1 // Copyright 2016 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7
8 #[cfg(all(feature = "hardware-lock-elision", any(target_arch = "x86", target_arch = "x86_64")))]
9 use std::arch::asm;
10 use std::sync::atomic::AtomicUsize;
11
12 // Extension trait to add lock elision primitives to atomic types
13 pub trait AtomicElisionExt {
14 type IntType;
15
16 // Perform a compare_exchange and start a transaction
elision_compare_exchange_acquire( &self, current: Self::IntType, new: Self::IntType, ) -> Result<Self::IntType, Self::IntType>17 fn elision_compare_exchange_acquire(
18 &self,
19 current: Self::IntType,
20 new: Self::IntType,
21 ) -> Result<Self::IntType, Self::IntType>;
22
23 // Perform a fetch_sub and end a transaction
elision_fetch_sub_release(&self, val: Self::IntType) -> Self::IntType24 fn elision_fetch_sub_release(&self, val: Self::IntType) -> Self::IntType;
25 }
26
27 // Indicates whether the target architecture supports lock elision
28 #[inline]
have_elision() -> bool29 pub fn have_elision() -> bool {
30 cfg!(all(
31 feature = "hardware-lock-elision",
32 any(target_arch = "x86", target_arch = "x86_64"),
33 ))
34 }
35
36 // This implementation is never actually called because it is guarded by
37 // have_elision().
38 #[cfg(not(all(feature = "hardware-lock-elision", any(target_arch = "x86", target_arch = "x86_64"))))]
39 impl AtomicElisionExt for AtomicUsize {
40 type IntType = usize;
41
42 #[inline]
elision_compare_exchange_acquire(&self, _: usize, _: usize) -> Result<usize, usize>43 fn elision_compare_exchange_acquire(&self, _: usize, _: usize) -> Result<usize, usize> {
44 unreachable!();
45 }
46
47 #[inline]
elision_fetch_sub_release(&self, _: usize) -> usize48 fn elision_fetch_sub_release(&self, _: usize) -> usize {
49 unreachable!();
50 }
51 }
52
53 #[cfg(all(feature = "hardware-lock-elision", any(target_arch = "x86", target_arch = "x86_64")))]
54 impl AtomicElisionExt for AtomicUsize {
55 type IntType = usize;
56
57 #[inline]
elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result<usize, usize>58 fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
59 unsafe {
60 use core::arch::asm;
61 let prev: usize;
62 #[cfg(target_pointer_width = "32")]
63 asm!(
64 "xacquire",
65 "lock",
66 "cmpxchg [{:e}], {:e}",
67 in(reg) self,
68 in(reg) new,
69 inout("eax") current => prev,
70 );
71 #[cfg(target_pointer_width = "64")]
72 asm!(
73 "xacquire",
74 "lock",
75 "cmpxchg [{}], {}",
76 in(reg) self,
77 in(reg) new,
78 inout("rax") current => prev,
79 );
80 if prev == current {
81 Ok(prev)
82 } else {
83 Err(prev)
84 }
85 }
86 }
87
88 #[inline]
elision_fetch_sub_release(&self, val: usize) -> usize89 fn elision_fetch_sub_release(&self, val: usize) -> usize {
90 unsafe {
91 use core::arch::asm;
92 let prev: usize;
93 #[cfg(target_pointer_width = "32")]
94 asm!(
95 "xrelease",
96 "lock",
97 "xadd [{:e}], {:e}",
98 in(reg) self,
99 inout(reg) val.wrapping_neg() => prev,
100 );
101 #[cfg(target_pointer_width = "64")]
102 asm!(
103 "xrelease",
104 "lock",
105 "xadd [{}], {}",
106 in(reg) self,
107 inout(reg) val.wrapping_neg() => prev,
108 );
109 prev
110 }
111 }
112 }
113