1 use alloc::boxed::Box;
2 use core::alloc::Layout;
3 use core::borrow::{Borrow, BorrowMut};
4 use core::cmp;
5 use core::fmt;
6 use core::marker::PhantomData;
7 use core::mem::{self, MaybeUninit};
8 use core::ops::{Deref, DerefMut};
9 use core::ptr;
10 use core::slice;
11 
12 use crate::guard::Guard;
13 use crate::primitive::sync::atomic::{AtomicUsize, Ordering};
14 use crossbeam_utils::atomic::AtomicConsume;
15 
16 /// Given ordering for the success case in a compare-exchange operation, returns the strongest
17 /// appropriate ordering for the failure case.
18 #[inline]
strongest_failure_ordering(ord: Ordering) -> Ordering19 fn strongest_failure_ordering(ord: Ordering) -> Ordering {
20     use self::Ordering::*;
21     match ord {
22         Relaxed | Release => Relaxed,
23         Acquire | AcqRel => Acquire,
24         _ => SeqCst,
25     }
26 }
27 
28 /// The error returned on failed compare-and-set operation.
29 // TODO: remove in the next major version.
30 #[deprecated(note = "Use `CompareExchangeError` instead")]
31 pub type CompareAndSetError<'g, T, P> = CompareExchangeError<'g, T, P>;
32 
33 /// The error returned on failed compare-and-swap operation.
34 pub struct CompareExchangeError<'g, T: ?Sized + Pointable, P: Pointer<T>> {
35     /// The value in the atomic pointer at the time of the failed operation.
36     pub current: Shared<'g, T>,
37 
38     /// The new value, which the operation failed to store.
39     pub new: P,
40 }
41 
42 impl<T, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareExchangeError<'_, T, P> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result43     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
44         f.debug_struct("CompareExchangeError")
45             .field("current", &self.current)
46             .field("new", &self.new)
47             .finish()
48     }
49 }
50 
51 /// Memory orderings for compare-and-set operations.
52 ///
53 /// A compare-and-set operation can have different memory orderings depending on whether it
54 /// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
55 ///
56 /// The two ways of specifying orderings for compare-and-set are:
57 ///
58 /// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
59 ///    ordering is chosen.
60 /// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
61 ///    for the failure case.
62 // TODO: remove in the next major version.
63 #[deprecated(
64     note = "`compare_and_set` and `compare_and_set_weak` that use this trait are deprecated, \
65             use `compare_exchange` or `compare_exchange_weak instead`"
66 )]
67 pub trait CompareAndSetOrdering {
68     /// The ordering of the operation when it succeeds.
success(&self) -> Ordering69     fn success(&self) -> Ordering;
70 
71     /// The ordering of the operation when it fails.
72     ///
73     /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
74     /// the success ordering.
failure(&self) -> Ordering75     fn failure(&self) -> Ordering;
76 }
77 
78 #[allow(deprecated)]
79 impl CompareAndSetOrdering for Ordering {
80     #[inline]
success(&self) -> Ordering81     fn success(&self) -> Ordering {
82         *self
83     }
84 
85     #[inline]
failure(&self) -> Ordering86     fn failure(&self) -> Ordering {
87         strongest_failure_ordering(*self)
88     }
89 }
90 
91 #[allow(deprecated)]
92 impl CompareAndSetOrdering for (Ordering, Ordering) {
93     #[inline]
success(&self) -> Ordering94     fn success(&self) -> Ordering {
95         self.0
96     }
97 
98     #[inline]
failure(&self) -> Ordering99     fn failure(&self) -> Ordering {
100         self.1
101     }
102 }
103 
104 /// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
105 #[inline]
low_bits<T: ?Sized + Pointable>() -> usize106 fn low_bits<T: ?Sized + Pointable>() -> usize {
107     (1 << T::ALIGN.trailing_zeros()) - 1
108 }
109 
110 /// Panics if the pointer is not properly unaligned.
111 #[inline]
ensure_aligned<T: ?Sized + Pointable>(raw: usize)112 fn ensure_aligned<T: ?Sized + Pointable>(raw: usize) {
113     assert_eq!(raw & low_bits::<T>(), 0, "unaligned pointer");
114 }
115 
116 /// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
117 ///
118 /// `tag` is truncated to fit into the unused bits of the pointer to `T`.
119 #[inline]
compose_tag<T: ?Sized + Pointable>(data: usize, tag: usize) -> usize120 fn compose_tag<T: ?Sized + Pointable>(data: usize, tag: usize) -> usize {
121     (data & !low_bits::<T>()) | (tag & low_bits::<T>())
122 }
123 
124 /// Decomposes a tagged pointer `data` into the pointer and the tag.
125 #[inline]
decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize)126 fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) {
127     (data & !low_bits::<T>(), data & low_bits::<T>())
128 }
129 
130 /// Types that are pointed to by a single word.
131 ///
132 /// In concurrent programming, it is necessary to represent an object within a word because atomic
133 /// operations (e.g., reads, writes, read-modify-writes) support only single words.  This trait
134 /// qualifies such types that are pointed to by a single word.
135 ///
136 /// The trait generalizes `Box<T>` for a sized type `T`.  In a box, an object of type `T` is
137 /// allocated in heap and it is owned by a single-word pointer.  This trait is also implemented for
138 /// `[MaybeUninit<T>]` by storing its size along with its elements and pointing to the pair of array
139 /// size and elements.
140 ///
141 /// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`].  In
142 /// particular, Crossbeam supports dynamically sized slices as follows.
143 ///
144 /// ```
145 /// use std::mem::MaybeUninit;
146 /// use crossbeam_epoch::Owned;
147 ///
148 /// let o = Owned::<[MaybeUninit<i32>]>::init(10); // allocating [i32; 10]
149 /// ```
150 pub trait Pointable {
151     /// The alignment of pointer.
152     const ALIGN: usize;
153 
154     /// The type for initializers.
155     type Init;
156 
157     /// Initializes a with the given initializer.
158     ///
159     /// # Safety
160     ///
161     /// The result should be a multiple of `ALIGN`.
init(init: Self::Init) -> usize162     unsafe fn init(init: Self::Init) -> usize;
163 
164     /// Dereferences the given pointer.
165     ///
166     /// # Safety
167     ///
168     /// - The given `ptr` should have been initialized with [`Pointable::init`].
169     /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
170     /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently.
deref<'a>(ptr: usize) -> &'a Self171     unsafe fn deref<'a>(ptr: usize) -> &'a Self;
172 
173     /// Mutably dereferences the given pointer.
174     ///
175     /// # Safety
176     ///
177     /// - The given `ptr` should have been initialized with [`Pointable::init`].
178     /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
179     /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
180     ///   concurrently.
deref_mut<'a>(ptr: usize) -> &'a mut Self181     unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self;
182 
183     /// Drops the object pointed to by the given pointer.
184     ///
185     /// # Safety
186     ///
187     /// - The given `ptr` should have been initialized with [`Pointable::init`].
188     /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
189     /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
190     ///   concurrently.
drop(ptr: usize)191     unsafe fn drop(ptr: usize);
192 }
193 
194 impl<T> Pointable for T {
195     const ALIGN: usize = mem::align_of::<T>();
196 
197     type Init = T;
198 
init(init: Self::Init) -> usize199     unsafe fn init(init: Self::Init) -> usize {
200         Box::into_raw(Box::new(init)) as usize
201     }
202 
deref<'a>(ptr: usize) -> &'a Self203     unsafe fn deref<'a>(ptr: usize) -> &'a Self {
204         &*(ptr as *const T)
205     }
206 
deref_mut<'a>(ptr: usize) -> &'a mut Self207     unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
208         &mut *(ptr as *mut T)
209     }
210 
drop(ptr: usize)211     unsafe fn drop(ptr: usize) {
212         drop(Box::from_raw(ptr as *mut T));
213     }
214 }
215 
216 /// Array with size.
217 ///
218 /// # Memory layout
219 ///
220 /// An array consisting of size and elements:
221 ///
222 /// ```text
223 ///          elements
224 ///          |
225 ///          |
226 /// ------------------------------------
227 /// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 |
228 /// ------------------------------------
229 /// ```
230 ///
231 /// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not
232 /// along with pointer as in `Box<[T]>`).
233 ///
234 /// Elements are not present in the type, but they will be in the allocation.
235 /// ```
236 #[repr(C)]
237 struct Array<T> {
238     /// The number of elements (not the number of bytes).
239     len: usize,
240     elements: [MaybeUninit<T>; 0],
241 }
242 
243 impl<T> Array<T> {
layout(len: usize) -> Layout244     fn layout(len: usize) -> Layout {
245         Layout::new::<Self>()
246             .extend(Layout::array::<MaybeUninit<T>>(len).unwrap())
247             .unwrap()
248             .0
249             .pad_to_align()
250     }
251 }
252 
253 impl<T> Pointable for [MaybeUninit<T>] {
254     const ALIGN: usize = mem::align_of::<Array<T>>();
255 
256     type Init = usize;
257 
init(len: Self::Init) -> usize258     unsafe fn init(len: Self::Init) -> usize {
259         let layout = Array::<T>::layout(len);
260         let ptr = alloc::alloc::alloc(layout).cast::<Array<T>>();
261         if ptr.is_null() {
262             alloc::alloc::handle_alloc_error(layout);
263         }
264         ptr::addr_of_mut!((*ptr).len).write(len);
265         ptr as usize
266     }
267 
deref<'a>(ptr: usize) -> &'a Self268     unsafe fn deref<'a>(ptr: usize) -> &'a Self {
269         let array = &*(ptr as *const Array<T>);
270         slice::from_raw_parts(array.elements.as_ptr() as *const _, array.len)
271     }
272 
deref_mut<'a>(ptr: usize) -> &'a mut Self273     unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
274         let array = &*(ptr as *mut Array<T>);
275         slice::from_raw_parts_mut(array.elements.as_ptr() as *mut _, array.len)
276     }
277 
drop(ptr: usize)278     unsafe fn drop(ptr: usize) {
279         let len = (*(ptr as *mut Array<T>)).len;
280         let layout = Array::<T>::layout(len);
281         alloc::alloc::dealloc(ptr as *mut u8, layout);
282     }
283 }
284 
285 /// An atomic pointer that can be safely shared between threads.
286 ///
287 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
288 /// least significant bits of the address. For example, the tag for a pointer to a sized type `T`
289 /// should be less than `(1 << mem::align_of::<T>().trailing_zeros())`.
290 ///
291 /// Any method that loads the pointer must be passed a reference to a [`Guard`].
292 ///
293 /// Crossbeam supports dynamically sized types.  See [`Pointable`] for details.
294 pub struct Atomic<T: ?Sized + Pointable> {
295     data: AtomicUsize,
296     _marker: PhantomData<*mut T>,
297 }
298 
299 unsafe impl<T: ?Sized + Pointable + Send + Sync> Send for Atomic<T> {}
300 unsafe impl<T: ?Sized + Pointable + Send + Sync> Sync for Atomic<T> {}
301 
302 impl<T> Atomic<T> {
303     /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
304     ///
305     /// # Examples
306     ///
307     /// ```
308     /// use crossbeam_epoch::Atomic;
309     ///
310     /// let a = Atomic::new(1234);
311     /// # unsafe { drop(a.into_owned()); } // avoid leak
312     /// ```
new(init: T) -> Atomic<T>313     pub fn new(init: T) -> Atomic<T> {
314         Self::init(init)
315     }
316 }
317 
318 impl<T: ?Sized + Pointable> Atomic<T> {
319     /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
320     ///
321     /// # Examples
322     ///
323     /// ```
324     /// use crossbeam_epoch::Atomic;
325     ///
326     /// let a = Atomic::<i32>::init(1234);
327     /// # unsafe { drop(a.into_owned()); } // avoid leak
328     /// ```
init(init: T::Init) -> Atomic<T>329     pub fn init(init: T::Init) -> Atomic<T> {
330         Self::from(Owned::init(init))
331     }
332 
333     /// Returns a new atomic pointer pointing to the tagged pointer `data`.
from_usize(data: usize) -> Self334     fn from_usize(data: usize) -> Self {
335         Self {
336             data: AtomicUsize::new(data),
337             _marker: PhantomData,
338         }
339     }
340 
341     /// Returns a new null atomic pointer.
342     ///
343     /// # Examples
344     ///
345     /// ```
346     /// use crossbeam_epoch::Atomic;
347     ///
348     /// let a = Atomic::<i32>::null();
349     /// ```
350     #[cfg(not(crossbeam_loom))]
null() -> Atomic<T>351     pub const fn null() -> Atomic<T> {
352         Self {
353             data: AtomicUsize::new(0),
354             _marker: PhantomData,
355         }
356     }
357     /// Returns a new null atomic pointer.
358     #[cfg(crossbeam_loom)]
null() -> Atomic<T>359     pub fn null() -> Atomic<T> {
360         Self {
361             data: AtomicUsize::new(0),
362             _marker: PhantomData,
363         }
364     }
365 
366     /// Loads a `Shared` from the atomic pointer.
367     ///
368     /// This method takes an [`Ordering`] argument which describes the memory ordering of this
369     /// operation.
370     ///
371     /// # Examples
372     ///
373     /// ```
374     /// use crossbeam_epoch::{self as epoch, Atomic};
375     /// use std::sync::atomic::Ordering::SeqCst;
376     ///
377     /// let a = Atomic::new(1234);
378     /// let guard = &epoch::pin();
379     /// let p = a.load(SeqCst, guard);
380     /// # unsafe { drop(a.into_owned()); } // avoid leak
381     /// ```
load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T>382     pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
383         unsafe { Shared::from_usize(self.data.load(ord)) }
384     }
385 
386     /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
387     ///
388     /// This is similar to the "acquire" ordering, except that an ordering is
389     /// only guaranteed with operations that "depend on" the result of the load.
390     /// However consume loads are usually much faster than acquire loads on
391     /// architectures with a weak memory model since they don't require memory
392     /// fence instructions.
393     ///
394     /// The exact definition of "depend on" is a bit vague, but it works as you
395     /// would expect in practice since a lot of software, especially the Linux
396     /// kernel, rely on this behavior.
397     ///
398     /// # Examples
399     ///
400     /// ```
401     /// use crossbeam_epoch::{self as epoch, Atomic};
402     ///
403     /// let a = Atomic::new(1234);
404     /// let guard = &epoch::pin();
405     /// let p = a.load_consume(guard);
406     /// # unsafe { drop(a.into_owned()); } // avoid leak
407     /// ```
load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T>408     pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> {
409         unsafe { Shared::from_usize(self.data.load_consume()) }
410     }
411 
412     /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
413     ///
414     /// This method takes an [`Ordering`] argument which describes the memory ordering of this
415     /// operation.
416     ///
417     /// # Examples
418     ///
419     /// ```
420     /// use crossbeam_epoch::{Atomic, Owned, Shared};
421     /// use std::sync::atomic::Ordering::SeqCst;
422     ///
423     /// let a = Atomic::new(1234);
424     /// # unsafe { drop(a.load(SeqCst, &crossbeam_epoch::pin()).into_owned()); } // avoid leak
425     /// a.store(Shared::null(), SeqCst);
426     /// a.store(Owned::new(1234), SeqCst);
427     /// # unsafe { drop(a.into_owned()); } // avoid leak
428     /// ```
store<P: Pointer<T>>(&self, new: P, ord: Ordering)429     pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) {
430         self.data.store(new.into_usize(), ord);
431     }
432 
433     /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
434     /// `Shared`.
435     ///
436     /// This method takes an [`Ordering`] argument which describes the memory ordering of this
437     /// operation.
438     ///
439     /// # Examples
440     ///
441     /// ```
442     /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
443     /// use std::sync::atomic::Ordering::SeqCst;
444     ///
445     /// let a = Atomic::new(1234);
446     /// let guard = &epoch::pin();
447     /// let p = a.swap(Shared::null(), SeqCst, guard);
448     /// # unsafe { drop(p.into_owned()); } // avoid leak
449     /// ```
swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T>450     pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
451         unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
452     }
453 
454     /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
455     /// value is the same as `current`. The tag is also taken into account, so two pointers to the
456     /// same object, but with different tags, will not be considered equal.
457     ///
458     /// The return value is a result indicating whether the new pointer was written. On success the
459     /// pointer that was written is returned. On failure the actual current value and `new` are
460     /// returned.
461     ///
462     /// This method takes two `Ordering` arguments to describe the memory
463     /// ordering of this operation. `success` describes the required ordering for the
464     /// read-modify-write operation that takes place if the comparison with `current` succeeds.
465     /// `failure` describes the required ordering for the load operation that takes place when
466     /// the comparison fails. Using `Acquire` as success ordering makes the store part
467     /// of this operation `Relaxed`, and using `Release` makes the successful load
468     /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
469     /// and must be equivalent to or weaker than the success ordering.
470     ///
471     /// # Examples
472     ///
473     /// ```
474     /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
475     /// use std::sync::atomic::Ordering::SeqCst;
476     ///
477     /// let a = Atomic::new(1234);
478     ///
479     /// let guard = &epoch::pin();
480     /// let curr = a.load(SeqCst, guard);
481     /// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard);
482     /// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard);
483     /// # unsafe { drop(curr.into_owned()); } // avoid leak
484     /// ```
compare_exchange<'g, P>( &self, current: Shared<'_, T>, new: P, success: Ordering, failure: Ordering, _: &'g Guard, ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>> where P: Pointer<T>,485     pub fn compare_exchange<'g, P>(
486         &self,
487         current: Shared<'_, T>,
488         new: P,
489         success: Ordering,
490         failure: Ordering,
491         _: &'g Guard,
492     ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
493     where
494         P: Pointer<T>,
495     {
496         let new = new.into_usize();
497         self.data
498             .compare_exchange(current.into_usize(), new, success, failure)
499             .map(|_| unsafe { Shared::from_usize(new) })
500             .map_err(|current| unsafe {
501                 CompareExchangeError {
502                     current: Shared::from_usize(current),
503                     new: P::from_usize(new),
504                 }
505             })
506     }
507 
508     /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
509     /// value is the same as `current`. The tag is also taken into account, so two pointers to the
510     /// same object, but with different tags, will not be considered equal.
511     ///
512     /// Unlike [`compare_exchange`], this method is allowed to spuriously fail even when comparison
513     /// succeeds, which can result in more efficient code on some platforms.  The return value is a
514     /// result indicating whether the new pointer was written. On success the pointer that was
515     /// written is returned. On failure the actual current value and `new` are returned.
516     ///
517     /// This method takes two `Ordering` arguments to describe the memory
518     /// ordering of this operation. `success` describes the required ordering for the
519     /// read-modify-write operation that takes place if the comparison with `current` succeeds.
520     /// `failure` describes the required ordering for the load operation that takes place when
521     /// the comparison fails. Using `Acquire` as success ordering makes the store part
522     /// of this operation `Relaxed`, and using `Release` makes the successful load
523     /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
524     /// and must be equivalent to or weaker than the success ordering.
525     ///
526     /// [`compare_exchange`]: Atomic::compare_exchange
527     ///
528     /// # Examples
529     ///
530     /// ```
531     /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
532     /// use std::sync::atomic::Ordering::SeqCst;
533     ///
534     /// let a = Atomic::new(1234);
535     /// let guard = &epoch::pin();
536     ///
537     /// let mut new = Owned::new(5678);
538     /// let mut ptr = a.load(SeqCst, guard);
539     /// # unsafe { drop(a.load(SeqCst, guard).into_owned()); } // avoid leak
540     /// loop {
541     ///     match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) {
542     ///         Ok(p) => {
543     ///             ptr = p;
544     ///             break;
545     ///         }
546     ///         Err(err) => {
547     ///             ptr = err.current;
548     ///             new = err.new;
549     ///         }
550     ///     }
551     /// }
552     ///
553     /// let mut curr = a.load(SeqCst, guard);
554     /// loop {
555     ///     match a.compare_exchange_weak(curr, Shared::null(), SeqCst, SeqCst, guard) {
556     ///         Ok(_) => break,
557     ///         Err(err) => curr = err.current,
558     ///     }
559     /// }
560     /// # unsafe { drop(curr.into_owned()); } // avoid leak
561     /// ```
compare_exchange_weak<'g, P>( &self, current: Shared<'_, T>, new: P, success: Ordering, failure: Ordering, _: &'g Guard, ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>> where P: Pointer<T>,562     pub fn compare_exchange_weak<'g, P>(
563         &self,
564         current: Shared<'_, T>,
565         new: P,
566         success: Ordering,
567         failure: Ordering,
568         _: &'g Guard,
569     ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
570     where
571         P: Pointer<T>,
572     {
573         let new = new.into_usize();
574         self.data
575             .compare_exchange_weak(current.into_usize(), new, success, failure)
576             .map(|_| unsafe { Shared::from_usize(new) })
577             .map_err(|current| unsafe {
578                 CompareExchangeError {
579                     current: Shared::from_usize(current),
580                     new: P::from_usize(new),
581                 }
582             })
583     }
584 
585     /// Fetches the pointer, and then applies a function to it that returns a new value.
586     /// Returns a `Result` of `Ok(previous_value)` if the function returned `Some`, else `Err(_)`.
587     ///
588     /// Note that the given function may be called multiple times if the value has been changed by
589     /// other threads in the meantime, as long as the function returns `Some(_)`, but the function
590     /// will have been applied only once to the stored value.
591     ///
592     /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
593     /// ordering of this operation. The first describes the required ordering for
594     /// when the operation finally succeeds while the second describes the
595     /// required ordering for loads. These correspond to the success and failure
596     /// orderings of [`Atomic::compare_exchange`] respectively.
597     ///
598     /// Using [`Acquire`] as success ordering makes the store part of this
599     /// operation [`Relaxed`], and using [`Release`] makes the final successful
600     /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
601     /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
602     /// success ordering.
603     ///
604     /// [`Relaxed`]: Ordering::Relaxed
605     /// [`Acquire`]: Ordering::Acquire
606     /// [`Release`]: Ordering::Release
607     /// [`SeqCst`]: Ordering::SeqCst
608     ///
609     /// # Examples
610     ///
611     /// ```
612     /// use crossbeam_epoch::{self as epoch, Atomic};
613     /// use std::sync::atomic::Ordering::SeqCst;
614     ///
615     /// let a = Atomic::new(1234);
616     /// let guard = &epoch::pin();
617     ///
618     /// let res1 = a.fetch_update(SeqCst, SeqCst, guard, |x| Some(x.with_tag(1)));
619     /// assert!(res1.is_ok());
620     ///
621     /// let res2 = a.fetch_update(SeqCst, SeqCst, guard, |x| None);
622     /// assert!(res2.is_err());
623     /// # unsafe { drop(a.into_owned()); } // avoid leak
624     /// ```
fetch_update<'g, F>( &self, set_order: Ordering, fail_order: Ordering, guard: &'g Guard, mut func: F, ) -> Result<Shared<'g, T>, Shared<'g, T>> where F: FnMut(Shared<'g, T>) -> Option<Shared<'g, T>>,625     pub fn fetch_update<'g, F>(
626         &self,
627         set_order: Ordering,
628         fail_order: Ordering,
629         guard: &'g Guard,
630         mut func: F,
631     ) -> Result<Shared<'g, T>, Shared<'g, T>>
632     where
633         F: FnMut(Shared<'g, T>) -> Option<Shared<'g, T>>,
634     {
635         let mut prev = self.load(fail_order, guard);
636         while let Some(next) = func(prev) {
637             match self.compare_exchange_weak(prev, next, set_order, fail_order, guard) {
638                 Ok(shared) => return Ok(shared),
639                 Err(next_prev) => prev = next_prev.current,
640             }
641         }
642         Err(prev)
643     }
644 
645     /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
646     /// value is the same as `current`. The tag is also taken into account, so two pointers to the
647     /// same object, but with different tags, will not be considered equal.
648     ///
649     /// The return value is a result indicating whether the new pointer was written. On success the
650     /// pointer that was written is returned. On failure the actual current value and `new` are
651     /// returned.
652     ///
653     /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
654     /// ordering of this operation.
655     ///
656     /// # Migrating to `compare_exchange`
657     ///
658     /// `compare_and_set` is equivalent to `compare_exchange` with the following mapping for
659     /// memory orderings:
660     ///
661     /// Original | Success | Failure
662     /// -------- | ------- | -------
663     /// Relaxed  | Relaxed | Relaxed
664     /// Acquire  | Acquire | Acquire
665     /// Release  | Release | Relaxed
666     /// AcqRel   | AcqRel  | Acquire
667     /// SeqCst   | SeqCst  | SeqCst
668     ///
669     /// # Examples
670     ///
671     /// ```
672     /// # #![allow(deprecated)]
673     /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
674     /// use std::sync::atomic::Ordering::SeqCst;
675     ///
676     /// let a = Atomic::new(1234);
677     ///
678     /// let guard = &epoch::pin();
679     /// let curr = a.load(SeqCst, guard);
680     /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
681     /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
682     /// # unsafe { drop(curr.into_owned()); } // avoid leak
683     /// ```
684     // TODO: remove in the next major version.
685     #[allow(deprecated)]
686     #[deprecated(note = "Use `compare_exchange` instead")]
compare_and_set<'g, O, P>( &self, current: Shared<'_, T>, new: P, ord: O, guard: &'g Guard, ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>> where O: CompareAndSetOrdering, P: Pointer<T>,687     pub fn compare_and_set<'g, O, P>(
688         &self,
689         current: Shared<'_, T>,
690         new: P,
691         ord: O,
692         guard: &'g Guard,
693     ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
694     where
695         O: CompareAndSetOrdering,
696         P: Pointer<T>,
697     {
698         self.compare_exchange(current, new, ord.success(), ord.failure(), guard)
699     }
700 
701     /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
702     /// value is the same as `current`. The tag is also taken into account, so two pointers to the
703     /// same object, but with different tags, will not be considered equal.
704     ///
705     /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
706     /// succeeds, which can result in more efficient code on some platforms.  The return value is a
707     /// result indicating whether the new pointer was written. On success the pointer that was
708     /// written is returned. On failure the actual current value and `new` are returned.
709     ///
710     /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
711     /// ordering of this operation.
712     ///
713     /// [`compare_and_set`]: Atomic::compare_and_set
714     ///
715     /// # Migrating to `compare_exchange_weak`
716     ///
717     /// `compare_and_set_weak` is equivalent to `compare_exchange_weak` with the following mapping for
718     /// memory orderings:
719     ///
720     /// Original | Success | Failure
721     /// -------- | ------- | -------
722     /// Relaxed  | Relaxed | Relaxed
723     /// Acquire  | Acquire | Acquire
724     /// Release  | Release | Relaxed
725     /// AcqRel   | AcqRel  | Acquire
726     /// SeqCst   | SeqCst  | SeqCst
727     ///
728     /// # Examples
729     ///
730     /// ```
731     /// # #![allow(deprecated)]
732     /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
733     /// use std::sync::atomic::Ordering::SeqCst;
734     ///
735     /// let a = Atomic::new(1234);
736     /// let guard = &epoch::pin();
737     ///
738     /// let mut new = Owned::new(5678);
739     /// let mut ptr = a.load(SeqCst, guard);
740     /// # unsafe { drop(a.load(SeqCst, guard).into_owned()); } // avoid leak
741     /// loop {
742     ///     match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
743     ///         Ok(p) => {
744     ///             ptr = p;
745     ///             break;
746     ///         }
747     ///         Err(err) => {
748     ///             ptr = err.current;
749     ///             new = err.new;
750     ///         }
751     ///     }
752     /// }
753     ///
754     /// let mut curr = a.load(SeqCst, guard);
755     /// loop {
756     ///     match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
757     ///         Ok(_) => break,
758     ///         Err(err) => curr = err.current,
759     ///     }
760     /// }
761     /// # unsafe { drop(curr.into_owned()); } // avoid leak
762     /// ```
763     // TODO: remove in the next major version.
764     #[allow(deprecated)]
765     #[deprecated(note = "Use `compare_exchange_weak` instead")]
compare_and_set_weak<'g, O, P>( &self, current: Shared<'_, T>, new: P, ord: O, guard: &'g Guard, ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>> where O: CompareAndSetOrdering, P: Pointer<T>,766     pub fn compare_and_set_weak<'g, O, P>(
767         &self,
768         current: Shared<'_, T>,
769         new: P,
770         ord: O,
771         guard: &'g Guard,
772     ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
773     where
774         O: CompareAndSetOrdering,
775         P: Pointer<T>,
776     {
777         self.compare_exchange_weak(current, new, ord.success(), ord.failure(), guard)
778     }
779 
780     /// Bitwise "and" with the current tag.
781     ///
782     /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
783     /// new tag to the result. Returns the previous pointer.
784     ///
785     /// This method takes an [`Ordering`] argument which describes the memory ordering of this
786     /// operation.
787     ///
788     /// # Examples
789     ///
790     /// ```
791     /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
792     /// use std::sync::atomic::Ordering::SeqCst;
793     ///
794     /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
795     /// let guard = &epoch::pin();
796     /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
797     /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
798     /// ```
fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T>799     pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
800         unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
801     }
802 
803     /// Bitwise "or" with the current tag.
804     ///
805     /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
806     /// new tag to the result. Returns the previous pointer.
807     ///
808     /// This method takes an [`Ordering`] argument which describes the memory ordering of this
809     /// operation.
810     ///
811     /// # Examples
812     ///
813     /// ```
814     /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
815     /// use std::sync::atomic::Ordering::SeqCst;
816     ///
817     /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
818     /// let guard = &epoch::pin();
819     /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
820     /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
821     /// ```
fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T>822     pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
823         unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
824     }
825 
826     /// Bitwise "xor" with the current tag.
827     ///
828     /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
829     /// new tag to the result. Returns the previous pointer.
830     ///
831     /// This method takes an [`Ordering`] argument which describes the memory ordering of this
832     /// operation.
833     ///
834     /// # Examples
835     ///
836     /// ```
837     /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
838     /// use std::sync::atomic::Ordering::SeqCst;
839     ///
840     /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
841     /// let guard = &epoch::pin();
842     /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
843     /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
844     /// ```
fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T>845     pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
846         unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
847     }
848 
849     /// Takes ownership of the pointee.
850     ///
851     /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
852     /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
853     /// destructors of data structures.
854     ///
855     /// # Panics
856     ///
857     /// Panics if this pointer is null, but only in debug mode.
858     ///
859     /// # Safety
860     ///
861     /// This method may be called only if the pointer is valid and nobody else is holding a
862     /// reference to the same object.
863     ///
864     /// # Examples
865     ///
866     /// ```rust
867     /// # use std::mem;
868     /// # use crossbeam_epoch::Atomic;
869     /// struct DataStructure {
870     ///     ptr: Atomic<usize>,
871     /// }
872     ///
873     /// impl Drop for DataStructure {
874     ///     fn drop(&mut self) {
875     ///         // By now the DataStructure lives only in our thread and we are sure we don't hold
876     ///         // any Shared or & to it ourselves.
877     ///         unsafe {
878     ///             drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
879     ///         }
880     ///     }
881     /// }
882     /// ```
into_owned(self) -> Owned<T>883     pub unsafe fn into_owned(self) -> Owned<T> {
884         Owned::from_usize(self.data.into_inner())
885     }
886 
887     /// Takes ownership of the pointee if it is non-null.
888     ///
889     /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
890     /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
891     /// destructors of data structures.
892     ///
893     /// # Safety
894     ///
895     /// This method may be called only if the pointer is valid and nobody else is holding a
896     /// reference to the same object, or the pointer is null.
897     ///
898     /// # Examples
899     ///
900     /// ```rust
901     /// # use std::mem;
902     /// # use crossbeam_epoch::Atomic;
903     /// struct DataStructure {
904     ///     ptr: Atomic<usize>,
905     /// }
906     ///
907     /// impl Drop for DataStructure {
908     ///     fn drop(&mut self) {
909     ///         // By now the DataStructure lives only in our thread and we are sure we don't hold
910     ///         // any Shared or & to it ourselves, but it may be null, so we have to be careful.
911     ///         let old = mem::replace(&mut self.ptr, Atomic::null());
912     ///         unsafe {
913     ///             if let Some(x) = old.try_into_owned() {
914     ///                 drop(x)
915     ///             }
916     ///         }
917     ///     }
918     /// }
919     /// ```
try_into_owned(self) -> Option<Owned<T>>920     pub unsafe fn try_into_owned(self) -> Option<Owned<T>> {
921         let data = self.data.into_inner();
922         if decompose_tag::<T>(data).0 == 0 {
923             None
924         } else {
925             Some(Owned::from_usize(data))
926         }
927     }
928 }
929 
930 impl<T: ?Sized + Pointable> fmt::Debug for Atomic<T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result931     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
932         let data = self.data.load(Ordering::SeqCst);
933         let (raw, tag) = decompose_tag::<T>(data);
934 
935         f.debug_struct("Atomic")
936             .field("raw", &raw)
937             .field("tag", &tag)
938             .finish()
939     }
940 }
941 
942 impl<T: ?Sized + Pointable> fmt::Pointer for Atomic<T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result943     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
944         let data = self.data.load(Ordering::SeqCst);
945         let (raw, _) = decompose_tag::<T>(data);
946         fmt::Pointer::fmt(&(unsafe { T::deref(raw) as *const _ }), f)
947     }
948 }
949 
950 impl<T: ?Sized + Pointable> Clone for Atomic<T> {
951     /// Returns a copy of the atomic value.
952     ///
953     /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
954     /// atomics or fences.
clone(&self) -> Self955     fn clone(&self) -> Self {
956         let data = self.data.load(Ordering::Relaxed);
957         Atomic::from_usize(data)
958     }
959 }
960 
961 impl<T: ?Sized + Pointable> Default for Atomic<T> {
default() -> Self962     fn default() -> Self {
963         Atomic::null()
964     }
965 }
966 
967 impl<T: ?Sized + Pointable> From<Owned<T>> for Atomic<T> {
968     /// Returns a new atomic pointer pointing to `owned`.
969     ///
970     /// # Examples
971     ///
972     /// ```
973     /// use crossbeam_epoch::{Atomic, Owned};
974     ///
975     /// let a = Atomic::<i32>::from(Owned::new(1234));
976     /// # unsafe { drop(a.into_owned()); } // avoid leak
977     /// ```
from(owned: Owned<T>) -> Self978     fn from(owned: Owned<T>) -> Self {
979         let data = owned.data;
980         mem::forget(owned);
981         Self::from_usize(data)
982     }
983 }
984 
985 impl<T> From<Box<T>> for Atomic<T> {
from(b: Box<T>) -> Self986     fn from(b: Box<T>) -> Self {
987         Self::from(Owned::from(b))
988     }
989 }
990 
991 impl<T> From<T> for Atomic<T> {
from(t: T) -> Self992     fn from(t: T) -> Self {
993         Self::new(t)
994     }
995 }
996 
997 impl<'g, T: ?Sized + Pointable> From<Shared<'g, T>> for Atomic<T> {
998     /// Returns a new atomic pointer pointing to `ptr`.
999     ///
1000     /// # Examples
1001     ///
1002     /// ```
1003     /// use crossbeam_epoch::{Atomic, Shared};
1004     ///
1005     /// let a = Atomic::<i32>::from(Shared::<i32>::null());
1006     /// ```
from(ptr: Shared<'g, T>) -> Self1007     fn from(ptr: Shared<'g, T>) -> Self {
1008         Self::from_usize(ptr.data)
1009     }
1010 }
1011 
1012 impl<T> From<*const T> for Atomic<T> {
1013     /// Returns a new atomic pointer pointing to `raw`.
1014     ///
1015     /// # Examples
1016     ///
1017     /// ```
1018     /// use std::ptr;
1019     /// use crossbeam_epoch::Atomic;
1020     ///
1021     /// let a = Atomic::<i32>::from(ptr::null::<i32>());
1022     /// ```
from(raw: *const T) -> Self1023     fn from(raw: *const T) -> Self {
1024         Self::from_usize(raw as usize)
1025     }
1026 }
1027 
1028 /// A trait for either `Owned` or `Shared` pointers.
1029 pub trait Pointer<T: ?Sized + Pointable> {
1030     /// Returns the machine representation of the pointer.
into_usize(self) -> usize1031     fn into_usize(self) -> usize;
1032 
1033     /// Returns a new pointer pointing to the tagged pointer `data`.
1034     ///
1035     /// # Safety
1036     ///
1037     /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should
1038     /// not be converted back by `Pointer::from_usize()` multiple times.
from_usize(data: usize) -> Self1039     unsafe fn from_usize(data: usize) -> Self;
1040 }
1041 
1042 /// An owned heap-allocated object.
1043 ///
1044 /// This type is very similar to `Box<T>`.
1045 ///
1046 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1047 /// least significant bits of the address.
1048 pub struct Owned<T: ?Sized + Pointable> {
1049     data: usize,
1050     _marker: PhantomData<Box<T>>,
1051 }
1052 
1053 impl<T: ?Sized + Pointable> Pointer<T> for Owned<T> {
1054     #[inline]
into_usize(self) -> usize1055     fn into_usize(self) -> usize {
1056         let data = self.data;
1057         mem::forget(self);
1058         data
1059     }
1060 
1061     /// Returns a new pointer pointing to the tagged pointer `data`.
1062     ///
1063     /// # Panics
1064     ///
1065     /// Panics if the data is zero in debug mode.
1066     #[inline]
from_usize(data: usize) -> Self1067     unsafe fn from_usize(data: usize) -> Self {
1068         debug_assert!(data != 0, "converting zero into `Owned`");
1069         Owned {
1070             data,
1071             _marker: PhantomData,
1072         }
1073     }
1074 }
1075 
1076 impl<T> Owned<T> {
1077     /// Returns a new owned pointer pointing to `raw`.
1078     ///
1079     /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
1080     /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
1081     /// the same raw pointer.
1082     ///
1083     /// # Panics
1084     ///
1085     /// Panics if `raw` is not properly aligned.
1086     ///
1087     /// # Safety
1088     ///
1089     /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted
1090     /// back by `Owned::from_raw()` multiple times.
1091     ///
1092     /// # Examples
1093     ///
1094     /// ```
1095     /// use crossbeam_epoch::Owned;
1096     ///
1097     /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1098     /// ```
from_raw(raw: *mut T) -> Owned<T>1099     pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
1100         let raw = raw as usize;
1101         ensure_aligned::<T>(raw);
1102         Self::from_usize(raw)
1103     }
1104 
1105     /// Converts the owned pointer into a `Box`.
1106     ///
1107     /// # Examples
1108     ///
1109     /// ```
1110     /// use crossbeam_epoch::Owned;
1111     ///
1112     /// let o = Owned::new(1234);
1113     /// let b: Box<i32> = o.into_box();
1114     /// assert_eq!(*b, 1234);
1115     /// ```
into_box(self) -> Box<T>1116     pub fn into_box(self) -> Box<T> {
1117         let (raw, _) = decompose_tag::<T>(self.data);
1118         mem::forget(self);
1119         unsafe { Box::from_raw(raw as *mut _) }
1120     }
1121 
1122     /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1123     ///
1124     /// # Examples
1125     ///
1126     /// ```
1127     /// use crossbeam_epoch::Owned;
1128     ///
1129     /// let o = Owned::new(1234);
1130     /// ```
new(init: T) -> Owned<T>1131     pub fn new(init: T) -> Owned<T> {
1132         Self::init(init)
1133     }
1134 }
1135 
1136 impl<T: ?Sized + Pointable> Owned<T> {
1137     /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1138     ///
1139     /// # Examples
1140     ///
1141     /// ```
1142     /// use crossbeam_epoch::Owned;
1143     ///
1144     /// let o = Owned::<i32>::init(1234);
1145     /// ```
init(init: T::Init) -> Owned<T>1146     pub fn init(init: T::Init) -> Owned<T> {
1147         unsafe { Self::from_usize(T::init(init)) }
1148     }
1149 
1150     /// Converts the owned pointer into a [`Shared`].
1151     ///
1152     /// # Examples
1153     ///
1154     /// ```
1155     /// use crossbeam_epoch::{self as epoch, Owned};
1156     ///
1157     /// let o = Owned::new(1234);
1158     /// let guard = &epoch::pin();
1159     /// let p = o.into_shared(guard);
1160     /// # unsafe { drop(p.into_owned()); } // avoid leak
1161     /// ```
1162     #[allow(clippy::needless_lifetimes)]
into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T>1163     pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
1164         unsafe { Shared::from_usize(self.into_usize()) }
1165     }
1166 
1167     /// Returns the tag stored within the pointer.
1168     ///
1169     /// # Examples
1170     ///
1171     /// ```
1172     /// use crossbeam_epoch::Owned;
1173     ///
1174     /// assert_eq!(Owned::new(1234).tag(), 0);
1175     /// ```
tag(&self) -> usize1176     pub fn tag(&self) -> usize {
1177         let (_, tag) = decompose_tag::<T>(self.data);
1178         tag
1179     }
1180 
1181     /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1182     /// unused bits of the pointer to `T`.
1183     ///
1184     /// # Examples
1185     ///
1186     /// ```
1187     /// use crossbeam_epoch::Owned;
1188     ///
1189     /// let o = Owned::new(0u64);
1190     /// assert_eq!(o.tag(), 0);
1191     /// let o = o.with_tag(2);
1192     /// assert_eq!(o.tag(), 2);
1193     /// ```
with_tag(self, tag: usize) -> Owned<T>1194     pub fn with_tag(self, tag: usize) -> Owned<T> {
1195         let data = self.into_usize();
1196         unsafe { Self::from_usize(compose_tag::<T>(data, tag)) }
1197     }
1198 }
1199 
1200 impl<T: ?Sized + Pointable> Drop for Owned<T> {
drop(&mut self)1201     fn drop(&mut self) {
1202         let (raw, _) = decompose_tag::<T>(self.data);
1203         unsafe {
1204             T::drop(raw);
1205         }
1206     }
1207 }
1208 
1209 impl<T: ?Sized + Pointable> fmt::Debug for Owned<T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1210     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1211         let (raw, tag) = decompose_tag::<T>(self.data);
1212 
1213         f.debug_struct("Owned")
1214             .field("raw", &raw)
1215             .field("tag", &tag)
1216             .finish()
1217     }
1218 }
1219 
1220 impl<T: Clone> Clone for Owned<T> {
clone(&self) -> Self1221     fn clone(&self) -> Self {
1222         Owned::new((**self).clone()).with_tag(self.tag())
1223     }
1224 }
1225 
1226 impl<T: ?Sized + Pointable> Deref for Owned<T> {
1227     type Target = T;
1228 
deref(&self) -> &T1229     fn deref(&self) -> &T {
1230         let (raw, _) = decompose_tag::<T>(self.data);
1231         unsafe { T::deref(raw) }
1232     }
1233 }
1234 
1235 impl<T: ?Sized + Pointable> DerefMut for Owned<T> {
deref_mut(&mut self) -> &mut T1236     fn deref_mut(&mut self) -> &mut T {
1237         let (raw, _) = decompose_tag::<T>(self.data);
1238         unsafe { T::deref_mut(raw) }
1239     }
1240 }
1241 
1242 impl<T> From<T> for Owned<T> {
from(t: T) -> Self1243     fn from(t: T) -> Self {
1244         Owned::new(t)
1245     }
1246 }
1247 
1248 impl<T> From<Box<T>> for Owned<T> {
1249     /// Returns a new owned pointer pointing to `b`.
1250     ///
1251     /// # Panics
1252     ///
1253     /// Panics if the pointer (the `Box`) is not properly aligned.
1254     ///
1255     /// # Examples
1256     ///
1257     /// ```
1258     /// use crossbeam_epoch::Owned;
1259     ///
1260     /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1261     /// ```
from(b: Box<T>) -> Self1262     fn from(b: Box<T>) -> Self {
1263         unsafe { Self::from_raw(Box::into_raw(b)) }
1264     }
1265 }
1266 
1267 impl<T: ?Sized + Pointable> Borrow<T> for Owned<T> {
borrow(&self) -> &T1268     fn borrow(&self) -> &T {
1269         self.deref()
1270     }
1271 }
1272 
1273 impl<T: ?Sized + Pointable> BorrowMut<T> for Owned<T> {
borrow_mut(&mut self) -> &mut T1274     fn borrow_mut(&mut self) -> &mut T {
1275         self.deref_mut()
1276     }
1277 }
1278 
1279 impl<T: ?Sized + Pointable> AsRef<T> for Owned<T> {
as_ref(&self) -> &T1280     fn as_ref(&self) -> &T {
1281         self.deref()
1282     }
1283 }
1284 
1285 impl<T: ?Sized + Pointable> AsMut<T> for Owned<T> {
as_mut(&mut self) -> &mut T1286     fn as_mut(&mut self) -> &mut T {
1287         self.deref_mut()
1288     }
1289 }
1290 
1291 /// A pointer to an object protected by the epoch GC.
1292 ///
1293 /// The pointer is valid for use only during the lifetime `'g`.
1294 ///
1295 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1296 /// least significant bits of the address.
1297 pub struct Shared<'g, T: 'g + ?Sized + Pointable> {
1298     data: usize,
1299     _marker: PhantomData<(&'g (), *const T)>,
1300 }
1301 
1302 impl<T: ?Sized + Pointable> Clone for Shared<'_, T> {
clone(&self) -> Self1303     fn clone(&self) -> Self {
1304         *self
1305     }
1306 }
1307 
1308 impl<T: ?Sized + Pointable> Copy for Shared<'_, T> {}
1309 
1310 impl<T: ?Sized + Pointable> Pointer<T> for Shared<'_, T> {
1311     #[inline]
into_usize(self) -> usize1312     fn into_usize(self) -> usize {
1313         self.data
1314     }
1315 
1316     #[inline]
from_usize(data: usize) -> Self1317     unsafe fn from_usize(data: usize) -> Self {
1318         Shared {
1319             data,
1320             _marker: PhantomData,
1321         }
1322     }
1323 }
1324 
1325 impl<'g, T> Shared<'g, T> {
1326     /// Converts the pointer to a raw pointer (without the tag).
1327     ///
1328     /// # Examples
1329     ///
1330     /// ```
1331     /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1332     /// use std::sync::atomic::Ordering::SeqCst;
1333     ///
1334     /// let o = Owned::new(1234);
1335     /// let raw = &*o as *const _;
1336     /// let a = Atomic::from(o);
1337     ///
1338     /// let guard = &epoch::pin();
1339     /// let p = a.load(SeqCst, guard);
1340     /// assert_eq!(p.as_raw(), raw);
1341     /// # unsafe { drop(a.into_owned()); } // avoid leak
1342     /// ```
as_raw(&self) -> *const T1343     pub fn as_raw(&self) -> *const T {
1344         let (raw, _) = decompose_tag::<T>(self.data);
1345         raw as *const _
1346     }
1347 }
1348 
1349 impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
1350     /// Returns a new null pointer.
1351     ///
1352     /// # Examples
1353     ///
1354     /// ```
1355     /// use crossbeam_epoch::Shared;
1356     ///
1357     /// let p = Shared::<i32>::null();
1358     /// assert!(p.is_null());
1359     /// ```
null() -> Shared<'g, T>1360     pub fn null() -> Shared<'g, T> {
1361         Shared {
1362             data: 0,
1363             _marker: PhantomData,
1364         }
1365     }
1366 
1367     /// Returns `true` if the pointer is null.
1368     ///
1369     /// # Examples
1370     ///
1371     /// ```
1372     /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1373     /// use std::sync::atomic::Ordering::SeqCst;
1374     ///
1375     /// let a = Atomic::null();
1376     /// let guard = &epoch::pin();
1377     /// assert!(a.load(SeqCst, guard).is_null());
1378     /// a.store(Owned::new(1234), SeqCst);
1379     /// assert!(!a.load(SeqCst, guard).is_null());
1380     /// # unsafe { drop(a.into_owned()); } // avoid leak
1381     /// ```
is_null(&self) -> bool1382     pub fn is_null(&self) -> bool {
1383         let (raw, _) = decompose_tag::<T>(self.data);
1384         raw == 0
1385     }
1386 
1387     /// Dereferences the pointer.
1388     ///
1389     /// Returns a reference to the pointee that is valid during the lifetime `'g`.
1390     ///
1391     /// # Safety
1392     ///
1393     /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1394     ///
1395     /// Another concern is the possibility of data races due to lack of proper synchronization.
1396     /// For example, consider the following scenario:
1397     ///
1398     /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1399     /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1400     ///
1401     /// The problem is that relaxed orderings don't synchronize initialization of the object with
1402     /// the read from the second thread. This is a data race. A possible solution would be to use
1403     /// `Release` and `Acquire` orderings.
1404     ///
1405     /// # Examples
1406     ///
1407     /// ```
1408     /// use crossbeam_epoch::{self as epoch, Atomic};
1409     /// use std::sync::atomic::Ordering::SeqCst;
1410     ///
1411     /// let a = Atomic::new(1234);
1412     /// let guard = &epoch::pin();
1413     /// let p = a.load(SeqCst, guard);
1414     /// unsafe {
1415     ///     assert_eq!(p.deref(), &1234);
1416     /// }
1417     /// # unsafe { drop(a.into_owned()); } // avoid leak
1418     /// ```
deref(&self) -> &'g T1419     pub unsafe fn deref(&self) -> &'g T {
1420         let (raw, _) = decompose_tag::<T>(self.data);
1421         T::deref(raw)
1422     }
1423 
1424     /// Dereferences the pointer.
1425     ///
1426     /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
1427     ///
1428     /// # Safety
1429     ///
1430     /// * There is no guarantee that there are no more threads attempting to read/write from/to the
1431     ///   actual object at the same time.
1432     ///
1433     ///   The user must know that there are no concurrent accesses towards the object itself.
1434     ///
1435     /// * Other than the above, all safety concerns of `deref()` applies here.
1436     ///
1437     /// # Examples
1438     ///
1439     /// ```
1440     /// use crossbeam_epoch::{self as epoch, Atomic};
1441     /// use std::sync::atomic::Ordering::SeqCst;
1442     ///
1443     /// let a = Atomic::new(vec![1, 2, 3, 4]);
1444     /// let guard = &epoch::pin();
1445     ///
1446     /// let mut p = a.load(SeqCst, guard);
1447     /// unsafe {
1448     ///     assert!(!p.is_null());
1449     ///     let b = p.deref_mut();
1450     ///     assert_eq!(b, &vec![1, 2, 3, 4]);
1451     ///     b.push(5);
1452     ///     assert_eq!(b, &vec![1, 2, 3, 4, 5]);
1453     /// }
1454     ///
1455     /// let p = a.load(SeqCst, guard);
1456     /// unsafe {
1457     ///     assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
1458     /// }
1459     /// # unsafe { drop(a.into_owned()); } // avoid leak
1460     /// ```
deref_mut(&mut self) -> &'g mut T1461     pub unsafe fn deref_mut(&mut self) -> &'g mut T {
1462         let (raw, _) = decompose_tag::<T>(self.data);
1463         T::deref_mut(raw)
1464     }
1465 
1466     /// Converts the pointer to a reference.
1467     ///
1468     /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
1469     ///
1470     /// # Safety
1471     ///
1472     /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1473     ///
1474     /// Another concern is the possibility of data races due to lack of proper synchronization.
1475     /// For example, consider the following scenario:
1476     ///
1477     /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1478     /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1479     ///
1480     /// The problem is that relaxed orderings don't synchronize initialization of the object with
1481     /// the read from the second thread. This is a data race. A possible solution would be to use
1482     /// `Release` and `Acquire` orderings.
1483     ///
1484     /// # Examples
1485     ///
1486     /// ```
1487     /// use crossbeam_epoch::{self as epoch, Atomic};
1488     /// use std::sync::atomic::Ordering::SeqCst;
1489     ///
1490     /// let a = Atomic::new(1234);
1491     /// let guard = &epoch::pin();
1492     /// let p = a.load(SeqCst, guard);
1493     /// unsafe {
1494     ///     assert_eq!(p.as_ref(), Some(&1234));
1495     /// }
1496     /// # unsafe { drop(a.into_owned()); } // avoid leak
1497     /// ```
as_ref(&self) -> Option<&'g T>1498     pub unsafe fn as_ref(&self) -> Option<&'g T> {
1499         let (raw, _) = decompose_tag::<T>(self.data);
1500         if raw == 0 {
1501             None
1502         } else {
1503             Some(T::deref(raw))
1504         }
1505     }
1506 
1507     /// Takes ownership of the pointee.
1508     ///
1509     /// # Panics
1510     ///
1511     /// Panics if this pointer is null, but only in debug mode.
1512     ///
1513     /// # Safety
1514     ///
1515     /// This method may be called only if the pointer is valid and nobody else is holding a
1516     /// reference to the same object.
1517     ///
1518     /// # Examples
1519     ///
1520     /// ```
1521     /// use crossbeam_epoch::{self as epoch, Atomic};
1522     /// use std::sync::atomic::Ordering::SeqCst;
1523     ///
1524     /// let a = Atomic::new(1234);
1525     /// unsafe {
1526     ///     let guard = &epoch::unprotected();
1527     ///     let p = a.load(SeqCst, guard);
1528     ///     drop(p.into_owned());
1529     /// }
1530     /// ```
into_owned(self) -> Owned<T>1531     pub unsafe fn into_owned(self) -> Owned<T> {
1532         debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`");
1533         Owned::from_usize(self.data)
1534     }
1535 
1536     /// Takes ownership of the pointee if it is not null.
1537     ///
1538     /// # Safety
1539     ///
1540     /// This method may be called only if the pointer is valid and nobody else is holding a
1541     /// reference to the same object, or if the pointer is null.
1542     ///
1543     /// # Examples
1544     ///
1545     /// ```
1546     /// use crossbeam_epoch::{self as epoch, Atomic};
1547     /// use std::sync::atomic::Ordering::SeqCst;
1548     ///
1549     /// let a = Atomic::new(1234);
1550     /// unsafe {
1551     ///     let guard = &epoch::unprotected();
1552     ///     let p = a.load(SeqCst, guard);
1553     ///     if let Some(x) = p.try_into_owned() {
1554     ///         drop(x);
1555     ///     }
1556     /// }
1557     /// ```
try_into_owned(self) -> Option<Owned<T>>1558     pub unsafe fn try_into_owned(self) -> Option<Owned<T>> {
1559         if self.is_null() {
1560             None
1561         } else {
1562             Some(Owned::from_usize(self.data))
1563         }
1564     }
1565 
1566     /// Returns the tag stored within the pointer.
1567     ///
1568     /// # Examples
1569     ///
1570     /// ```
1571     /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1572     /// use std::sync::atomic::Ordering::SeqCst;
1573     ///
1574     /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
1575     /// let guard = &epoch::pin();
1576     /// let p = a.load(SeqCst, guard);
1577     /// assert_eq!(p.tag(), 2);
1578     /// # unsafe { drop(a.into_owned()); } // avoid leak
1579     /// ```
tag(&self) -> usize1580     pub fn tag(&self) -> usize {
1581         let (_, tag) = decompose_tag::<T>(self.data);
1582         tag
1583     }
1584 
1585     /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1586     /// unused bits of the pointer to `T`.
1587     ///
1588     /// # Examples
1589     ///
1590     /// ```
1591     /// use crossbeam_epoch::{self as epoch, Atomic};
1592     /// use std::sync::atomic::Ordering::SeqCst;
1593     ///
1594     /// let a = Atomic::new(0u64);
1595     /// let guard = &epoch::pin();
1596     /// let p1 = a.load(SeqCst, guard);
1597     /// let p2 = p1.with_tag(2);
1598     ///
1599     /// assert_eq!(p1.tag(), 0);
1600     /// assert_eq!(p2.tag(), 2);
1601     /// assert_eq!(p1.as_raw(), p2.as_raw());
1602     /// # unsafe { drop(a.into_owned()); } // avoid leak
1603     /// ```
with_tag(&self, tag: usize) -> Shared<'g, T>1604     pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
1605         unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) }
1606     }
1607 }
1608 
1609 impl<T> From<*const T> for Shared<'_, T> {
1610     /// Returns a new pointer pointing to `raw`.
1611     ///
1612     /// # Panics
1613     ///
1614     /// Panics if `raw` is not properly aligned.
1615     ///
1616     /// # Examples
1617     ///
1618     /// ```
1619     /// use crossbeam_epoch::Shared;
1620     ///
1621     /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _);
1622     /// assert!(!p.is_null());
1623     /// # unsafe { drop(p.into_owned()); } // avoid leak
1624     /// ```
from(raw: *const T) -> Self1625     fn from(raw: *const T) -> Self {
1626         let raw = raw as usize;
1627         ensure_aligned::<T>(raw);
1628         unsafe { Self::from_usize(raw) }
1629     }
1630 }
1631 
1632 impl<'g, T: ?Sized + Pointable> PartialEq<Shared<'g, T>> for Shared<'g, T> {
eq(&self, other: &Self) -> bool1633     fn eq(&self, other: &Self) -> bool {
1634         self.data == other.data
1635     }
1636 }
1637 
1638 impl<T: ?Sized + Pointable> Eq for Shared<'_, T> {}
1639 
1640 impl<'g, T: ?Sized + Pointable> PartialOrd<Shared<'g, T>> for Shared<'g, T> {
partial_cmp(&self, other: &Self) -> Option<cmp::Ordering>1641     fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1642         self.data.partial_cmp(&other.data)
1643     }
1644 }
1645 
1646 impl<T: ?Sized + Pointable> Ord for Shared<'_, T> {
cmp(&self, other: &Self) -> cmp::Ordering1647     fn cmp(&self, other: &Self) -> cmp::Ordering {
1648         self.data.cmp(&other.data)
1649     }
1650 }
1651 
1652 impl<T: ?Sized + Pointable> fmt::Debug for Shared<'_, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1653     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1654         let (raw, tag) = decompose_tag::<T>(self.data);
1655 
1656         f.debug_struct("Shared")
1657             .field("raw", &raw)
1658             .field("tag", &tag)
1659             .finish()
1660     }
1661 }
1662 
1663 impl<T: ?Sized + Pointable> fmt::Pointer for Shared<'_, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1664     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1665         fmt::Pointer::fmt(&(unsafe { self.deref() as *const _ }), f)
1666     }
1667 }
1668 
1669 impl<T: ?Sized + Pointable> Default for Shared<'_, T> {
default() -> Self1670     fn default() -> Self {
1671         Shared::null()
1672     }
1673 }
1674 
1675 #[cfg(all(test, not(crossbeam_loom)))]
1676 mod tests {
1677     use super::{Owned, Shared};
1678     use std::mem::MaybeUninit;
1679 
1680     #[test]
valid_tag_i8()1681     fn valid_tag_i8() {
1682         Shared::<i8>::null().with_tag(0);
1683     }
1684 
1685     #[test]
valid_tag_i64()1686     fn valid_tag_i64() {
1687         Shared::<i64>::null().with_tag(7);
1688     }
1689 
1690     #[test]
const_atomic_null()1691     fn const_atomic_null() {
1692         use super::Atomic;
1693         static _U: Atomic<u8> = Atomic::<u8>::null();
1694     }
1695 
1696     #[test]
array_init()1697     fn array_init() {
1698         let owned = Owned::<[MaybeUninit<usize>]>::init(10);
1699         let arr: &[MaybeUninit<usize>] = &owned;
1700         assert_eq!(arr.len(), 10);
1701     }
1702 }
1703