1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "absl/base/internal/spinlock.h"
16
17 #include <algorithm>
18 #include <atomic>
19 #include <limits>
20
21 #include "absl/base/attributes.h"
22 #include "absl/base/config.h"
23 #include "absl/base/internal/atomic_hook.h"
24 #include "absl/base/internal/cycleclock.h"
25 #include "absl/base/internal/spinlock_wait.h"
26 #include "absl/base/internal/sysinfo.h" /* For NumCPUs() */
27 #include "absl/base/call_once.h"
28
29 // Description of lock-word:
30 // 31..00: [............................3][2][1][0]
31 //
32 // [0]: kSpinLockHeld
33 // [1]: kSpinLockCooperative
34 // [2]: kSpinLockDisabledScheduling
35 // [31..3]: ONLY kSpinLockSleeper OR
36 // Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT
37 //
38 // Detailed descriptions:
39 //
40 // Bit [0]: The lock is considered held iff kSpinLockHeld is set.
41 //
42 // Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when
43 // contended iff kSpinLockCooperative is set.
44 //
45 // Bit [2]: This bit is exclusive from bit [1]. It is used only by a
46 // non-cooperative lock. When set, indicates that scheduling was
47 // successfully disabled when the lock was acquired. May be unset,
48 // even if non-cooperative, if a ThreadIdentity did not yet exist at
49 // time of acquisition.
50 //
51 // Bit [3]: If this is the only upper bit ([31..3]) set then this lock was
52 // acquired without contention, however, at least one waiter exists.
53 //
54 // Otherwise, bits [31..3] represent the time spent by the current lock
55 // holder to acquire the lock. There may be outstanding waiter(s).
56
57 namespace absl {
58 ABSL_NAMESPACE_BEGIN
59 namespace base_internal {
60
61 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static base_internal::AtomicHook<void (*)(
62 const void *lock, int64_t wait_cycles)>
63 submit_profile_data;
64
RegisterSpinLockProfiler(void (* fn)(const void * contendedlock,int64_t wait_cycles))65 void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
66 int64_t wait_cycles)) {
67 submit_profile_data.Store(fn);
68 }
69
70 #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
71 // Static member variable definitions.
72 constexpr uint32_t SpinLock::kSpinLockHeld;
73 constexpr uint32_t SpinLock::kSpinLockCooperative;
74 constexpr uint32_t SpinLock::kSpinLockDisabledScheduling;
75 constexpr uint32_t SpinLock::kSpinLockSleeper;
76 constexpr uint32_t SpinLock::kWaitTimeMask;
77 #endif
78
79 // Uncommon constructors.
SpinLock(base_internal::SchedulingMode mode)80 SpinLock::SpinLock(base_internal::SchedulingMode mode)
81 : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
82 ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
83 }
84
85 // Monitor the lock to see if its value changes within some time period
86 // (adaptive_spin_count loop iterations). The last value read from the lock
87 // is returned from the method.
SpinLoop()88 uint32_t SpinLock::SpinLoop() {
89 // We are already in the slow path of SpinLock, initialize the
90 // adaptive_spin_count here.
91 ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count;
92 ABSL_CONST_INIT static int adaptive_spin_count = 0;
93 base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() {
94 adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1;
95 });
96
97 int c = adaptive_spin_count;
98 uint32_t lock_value;
99 do {
100 lock_value = lockword_.load(std::memory_order_relaxed);
101 } while ((lock_value & kSpinLockHeld) != 0 && --c > 0);
102 return lock_value;
103 }
104
SlowLock()105 void SpinLock::SlowLock() {
106 uint32_t lock_value = SpinLoop();
107 lock_value = TryLockInternal(lock_value, 0);
108 if ((lock_value & kSpinLockHeld) == 0) {
109 return;
110 }
111
112 base_internal::SchedulingMode scheduling_mode;
113 if ((lock_value & kSpinLockCooperative) != 0) {
114 scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
115 } else {
116 scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
117 }
118
119 // The lock was not obtained initially, so this thread needs to wait for
120 // it. Record the current timestamp in the local variable wait_start_time
121 // so the total wait time can be stored in the lockword once this thread
122 // obtains the lock.
123 int64_t wait_start_time = CycleClock::Now();
124 uint32_t wait_cycles = 0;
125 int lock_wait_call_count = 0;
126 while ((lock_value & kSpinLockHeld) != 0) {
127 // If the lock is currently held, but not marked as having a sleeper, mark
128 // it as having a sleeper.
129 if ((lock_value & kWaitTimeMask) == 0) {
130 // Here, just "mark" that the thread is going to sleep. Don't store the
131 // lock wait time in the lock -- the lock word stores the amount of time
132 // that the current holder waited before acquiring the lock, not the wait
133 // time of any thread currently waiting to acquire it.
134 if (lockword_.compare_exchange_strong(
135 lock_value, lock_value | kSpinLockSleeper,
136 std::memory_order_relaxed, std::memory_order_relaxed)) {
137 // Successfully transitioned to kSpinLockSleeper. Pass
138 // kSpinLockSleeper to the SpinLockWait routine to properly indicate
139 // the last lock_value observed.
140 lock_value |= kSpinLockSleeper;
141 } else if ((lock_value & kSpinLockHeld) == 0) {
142 // Lock is free again, so try and acquire it before sleeping. The
143 // new lock state will be the number of cycles this thread waited if
144 // this thread obtains the lock.
145 lock_value = TryLockInternal(lock_value, wait_cycles);
146 continue; // Skip the delay at the end of the loop.
147 } else if ((lock_value & kWaitTimeMask) == 0) {
148 // The lock is still held, without a waiter being marked, but something
149 // else about the lock word changed, causing our CAS to fail. For
150 // example, a new lock holder may have acquired the lock with
151 // kSpinLockDisabledScheduling set, whereas the previous holder had not
152 // set that flag. In this case, attempt again to mark ourselves as a
153 // waiter.
154 continue;
155 }
156 }
157
158 // SpinLockDelay() calls into fiber scheduler, we need to see
159 // synchronization there to avoid false positives.
160 ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
161 // Wait for an OS specific delay.
162 base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count,
163 scheduling_mode);
164 ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
165 // Spin again after returning from the wait routine to give this thread
166 // some chance of obtaining the lock.
167 lock_value = SpinLoop();
168 wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now());
169 lock_value = TryLockInternal(lock_value, wait_cycles);
170 }
171 }
172
SlowUnlock(uint32_t lock_value)173 void SpinLock::SlowUnlock(uint32_t lock_value) {
174 base_internal::SpinLockWake(&lockword_,
175 false); // wake waiter if necessary
176
177 // If our acquisition was contended, collect contentionz profile info. We
178 // reserve a unitary wait time to represent that a waiter exists without our
179 // own acquisition having been contended.
180 if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
181 const int64_t wait_cycles = DecodeWaitCycles(lock_value);
182 ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
183 submit_profile_data(this, wait_cycles);
184 ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
185 }
186 }
187
188 // We use the upper 29 bits of the lock word to store the time spent waiting to
189 // acquire this lock. This is reported by contentionz profiling. Since the
190 // lower bits of the cycle counter wrap very quickly on high-frequency
191 // processors we divide to reduce the granularity to 2^kProfileTimestampShift
192 // sized units. On a 4Ghz machine this will lose track of wait times greater
193 // than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare.
194 static constexpr int kProfileTimestampShift = 7;
195
196 // We currently reserve the lower 3 bits.
197 static constexpr int kLockwordReservedShift = 3;
198
EncodeWaitCycles(int64_t wait_start_time,int64_t wait_end_time)199 uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
200 int64_t wait_end_time) {
201 static const int64_t kMaxWaitTime =
202 std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift;
203 int64_t scaled_wait_time =
204 (wait_end_time - wait_start_time) >> kProfileTimestampShift;
205
206 // Return a representation of the time spent waiting that can be stored in
207 // the lock word's upper bits.
208 uint32_t clamped = static_cast<uint32_t>(
209 std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift);
210
211 if (clamped == 0) {
212 return kSpinLockSleeper; // Just wake waiters, but don't record contention.
213 }
214 // Bump up value if necessary to avoid returning kSpinLockSleeper.
215 const uint32_t kMinWaitTime =
216 kSpinLockSleeper + (1 << kLockwordReservedShift);
217 if (clamped == kSpinLockSleeper) {
218 return kMinWaitTime;
219 }
220 return clamped;
221 }
222
DecodeWaitCycles(uint32_t lock_value)223 int64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
224 // Cast to uint32_t first to ensure bits [63:32] are cleared.
225 const int64_t scaled_wait_time =
226 static_cast<uint32_t>(lock_value & kWaitTimeMask);
227 return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
228 }
229
230 } // namespace base_internal
231 ABSL_NAMESPACE_END
232 } // namespace absl
233