xref: /aosp_15_r20/external/abseil-cpp/absl/time/clock.cc (revision 9356374a3709195abf420251b3e825997ff56c0f)
1*9356374aSAndroid Build Coastguard Worker // Copyright 2017 The Abseil Authors.
2*9356374aSAndroid Build Coastguard Worker //
3*9356374aSAndroid Build Coastguard Worker // Licensed under the Apache License, Version 2.0 (the "License");
4*9356374aSAndroid Build Coastguard Worker // you may not use this file except in compliance with the License.
5*9356374aSAndroid Build Coastguard Worker // You may obtain a copy of the License at
6*9356374aSAndroid Build Coastguard Worker //
7*9356374aSAndroid Build Coastguard Worker //      https://www.apache.org/licenses/LICENSE-2.0
8*9356374aSAndroid Build Coastguard Worker //
9*9356374aSAndroid Build Coastguard Worker // Unless required by applicable law or agreed to in writing, software
10*9356374aSAndroid Build Coastguard Worker // distributed under the License is distributed on an "AS IS" BASIS,
11*9356374aSAndroid Build Coastguard Worker // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12*9356374aSAndroid Build Coastguard Worker // See the License for the specific language governing permissions and
13*9356374aSAndroid Build Coastguard Worker // limitations under the License.
14*9356374aSAndroid Build Coastguard Worker 
15*9356374aSAndroid Build Coastguard Worker #include "absl/time/clock.h"
16*9356374aSAndroid Build Coastguard Worker 
17*9356374aSAndroid Build Coastguard Worker #include "absl/base/attributes.h"
18*9356374aSAndroid Build Coastguard Worker #include "absl/base/optimization.h"
19*9356374aSAndroid Build Coastguard Worker 
20*9356374aSAndroid Build Coastguard Worker #ifdef _WIN32
21*9356374aSAndroid Build Coastguard Worker #include <windows.h>
22*9356374aSAndroid Build Coastguard Worker #endif
23*9356374aSAndroid Build Coastguard Worker 
24*9356374aSAndroid Build Coastguard Worker #include <algorithm>
25*9356374aSAndroid Build Coastguard Worker #include <atomic>
26*9356374aSAndroid Build Coastguard Worker #include <cerrno>
27*9356374aSAndroid Build Coastguard Worker #include <cstdint>
28*9356374aSAndroid Build Coastguard Worker #include <ctime>
29*9356374aSAndroid Build Coastguard Worker #include <limits>
30*9356374aSAndroid Build Coastguard Worker 
31*9356374aSAndroid Build Coastguard Worker #include "absl/base/internal/spinlock.h"
32*9356374aSAndroid Build Coastguard Worker #include "absl/base/internal/unscaledcycleclock.h"
33*9356374aSAndroid Build Coastguard Worker #include "absl/base/macros.h"
34*9356374aSAndroid Build Coastguard Worker #include "absl/base/port.h"
35*9356374aSAndroid Build Coastguard Worker #include "absl/base/thread_annotations.h"
36*9356374aSAndroid Build Coastguard Worker 
37*9356374aSAndroid Build Coastguard Worker namespace absl {
38*9356374aSAndroid Build Coastguard Worker ABSL_NAMESPACE_BEGIN
Now()39*9356374aSAndroid Build Coastguard Worker Time Now() {
40*9356374aSAndroid Build Coastguard Worker   // TODO(bww): Get a timespec instead so we don't have to divide.
41*9356374aSAndroid Build Coastguard Worker   int64_t n = absl::GetCurrentTimeNanos();
42*9356374aSAndroid Build Coastguard Worker   if (n >= 0) {
43*9356374aSAndroid Build Coastguard Worker     return time_internal::FromUnixDuration(
44*9356374aSAndroid Build Coastguard Worker         time_internal::MakeDuration(n / 1000000000, n % 1000000000 * 4));
45*9356374aSAndroid Build Coastguard Worker   }
46*9356374aSAndroid Build Coastguard Worker   return time_internal::FromUnixDuration(absl::Nanoseconds(n));
47*9356374aSAndroid Build Coastguard Worker }
48*9356374aSAndroid Build Coastguard Worker ABSL_NAMESPACE_END
49*9356374aSAndroid Build Coastguard Worker }  // namespace absl
50*9356374aSAndroid Build Coastguard Worker 
51*9356374aSAndroid Build Coastguard Worker // Decide if we should use the fast GetCurrentTimeNanos() algorithm based on the
52*9356374aSAndroid Build Coastguard Worker // cyclecounter, otherwise just get the time directly from the OS on every call.
53*9356374aSAndroid Build Coastguard Worker // By default, the fast algorithm based on the cyclecount is disabled because in
54*9356374aSAndroid Build Coastguard Worker // certain situations, for example, if the OS enters a "sleep" mode, it may
55*9356374aSAndroid Build Coastguard Worker // produce incorrect values immediately upon waking.
56*9356374aSAndroid Build Coastguard Worker // This can be chosen at compile-time via
57*9356374aSAndroid Build Coastguard Worker // -DABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS=[0|1]
58*9356374aSAndroid Build Coastguard Worker #ifndef ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
59*9356374aSAndroid Build Coastguard Worker #define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 0
60*9356374aSAndroid Build Coastguard Worker #endif
61*9356374aSAndroid Build Coastguard Worker 
62*9356374aSAndroid Build Coastguard Worker #if defined(__APPLE__) || defined(_WIN32)
63*9356374aSAndroid Build Coastguard Worker #include "absl/time/internal/get_current_time_chrono.inc"
64*9356374aSAndroid Build Coastguard Worker #else
65*9356374aSAndroid Build Coastguard Worker #include "absl/time/internal/get_current_time_posix.inc"
66*9356374aSAndroid Build Coastguard Worker #endif
67*9356374aSAndroid Build Coastguard Worker 
68*9356374aSAndroid Build Coastguard Worker // Allows override by test.
69*9356374aSAndroid Build Coastguard Worker #ifndef GET_CURRENT_TIME_NANOS_FROM_SYSTEM
70*9356374aSAndroid Build Coastguard Worker #define GET_CURRENT_TIME_NANOS_FROM_SYSTEM() \
71*9356374aSAndroid Build Coastguard Worker   ::absl::time_internal::GetCurrentTimeNanosFromSystem()
72*9356374aSAndroid Build Coastguard Worker #endif
73*9356374aSAndroid Build Coastguard Worker 
74*9356374aSAndroid Build Coastguard Worker #if !ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
75*9356374aSAndroid Build Coastguard Worker namespace absl {
76*9356374aSAndroid Build Coastguard Worker ABSL_NAMESPACE_BEGIN
GetCurrentTimeNanos()77*9356374aSAndroid Build Coastguard Worker int64_t GetCurrentTimeNanos() { return GET_CURRENT_TIME_NANOS_FROM_SYSTEM(); }
78*9356374aSAndroid Build Coastguard Worker ABSL_NAMESPACE_END
79*9356374aSAndroid Build Coastguard Worker }  // namespace absl
80*9356374aSAndroid Build Coastguard Worker #else  // Use the cyclecounter-based implementation below.
81*9356374aSAndroid Build Coastguard Worker 
82*9356374aSAndroid Build Coastguard Worker // Allows override by test.
83*9356374aSAndroid Build Coastguard Worker #ifndef GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW
84*9356374aSAndroid Build Coastguard Worker #define GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW() \
85*9356374aSAndroid Build Coastguard Worker   ::absl::time_internal::UnscaledCycleClockWrapperForGetCurrentTime::Now()
86*9356374aSAndroid Build Coastguard Worker #endif
87*9356374aSAndroid Build Coastguard Worker 
88*9356374aSAndroid Build Coastguard Worker namespace absl {
89*9356374aSAndroid Build Coastguard Worker ABSL_NAMESPACE_BEGIN
90*9356374aSAndroid Build Coastguard Worker namespace time_internal {
91*9356374aSAndroid Build Coastguard Worker 
92*9356374aSAndroid Build Coastguard Worker // On some processors, consecutive reads of the cycle counter may yield the
93*9356374aSAndroid Build Coastguard Worker // same value (weakly-increasing). In debug mode, clear the least significant
94*9356374aSAndroid Build Coastguard Worker // bits to discourage depending on a strictly-increasing Now() value.
95*9356374aSAndroid Build Coastguard Worker // In x86-64's debug mode, discourage depending on a strictly-increasing Now()
96*9356374aSAndroid Build Coastguard Worker // value.
97*9356374aSAndroid Build Coastguard Worker #if !defined(NDEBUG) && defined(__x86_64__)
98*9356374aSAndroid Build Coastguard Worker constexpr int64_t kCycleClockNowMask = ~int64_t{0xff};
99*9356374aSAndroid Build Coastguard Worker #else
100*9356374aSAndroid Build Coastguard Worker constexpr int64_t kCycleClockNowMask = ~int64_t{0};
101*9356374aSAndroid Build Coastguard Worker #endif
102*9356374aSAndroid Build Coastguard Worker 
103*9356374aSAndroid Build Coastguard Worker // This is a friend wrapper around UnscaledCycleClock::Now()
104*9356374aSAndroid Build Coastguard Worker // (needed to access UnscaledCycleClock).
105*9356374aSAndroid Build Coastguard Worker class UnscaledCycleClockWrapperForGetCurrentTime {
106*9356374aSAndroid Build Coastguard Worker  public:
Now()107*9356374aSAndroid Build Coastguard Worker   static int64_t Now() {
108*9356374aSAndroid Build Coastguard Worker     return base_internal::UnscaledCycleClock::Now() & kCycleClockNowMask;
109*9356374aSAndroid Build Coastguard Worker   }
110*9356374aSAndroid Build Coastguard Worker };
111*9356374aSAndroid Build Coastguard Worker }  // namespace time_internal
112*9356374aSAndroid Build Coastguard Worker 
113*9356374aSAndroid Build Coastguard Worker // uint64_t is used in this module to provide an extra bit in multiplications
114*9356374aSAndroid Build Coastguard Worker 
115*9356374aSAndroid Build Coastguard Worker // ---------------------------------------------------------------------
116*9356374aSAndroid Build Coastguard Worker // An implementation of reader-write locks that use no atomic ops in the read
117*9356374aSAndroid Build Coastguard Worker // case.  This is a generalization of Lamport's method for reading a multiword
118*9356374aSAndroid Build Coastguard Worker // clock.  Increment a word on each write acquisition, using the low-order bit
119*9356374aSAndroid Build Coastguard Worker // as a spinlock; the word is the high word of the "clock".  Readers read the
120*9356374aSAndroid Build Coastguard Worker // high word, then all other data, then the high word again, and repeat the
121*9356374aSAndroid Build Coastguard Worker // read if the reads of the high words yields different answers, or an odd
122*9356374aSAndroid Build Coastguard Worker // value (either case suggests possible interference from a writer).
123*9356374aSAndroid Build Coastguard Worker // Here we use a spinlock to ensure only one writer at a time, rather than
124*9356374aSAndroid Build Coastguard Worker // spinning on the bottom bit of the word to benefit from SpinLock
125*9356374aSAndroid Build Coastguard Worker // spin-delay tuning.
126*9356374aSAndroid Build Coastguard Worker 
127*9356374aSAndroid Build Coastguard Worker // Acquire seqlock (*seq) and return the value to be written to unlock.
SeqAcquire(std::atomic<uint64_t> * seq)128*9356374aSAndroid Build Coastguard Worker static inline uint64_t SeqAcquire(std::atomic<uint64_t> *seq) {
129*9356374aSAndroid Build Coastguard Worker   uint64_t x = seq->fetch_add(1, std::memory_order_relaxed);
130*9356374aSAndroid Build Coastguard Worker 
131*9356374aSAndroid Build Coastguard Worker   // We put a release fence between update to *seq and writes to shared data.
132*9356374aSAndroid Build Coastguard Worker   // Thus all stores to shared data are effectively release operations and
133*9356374aSAndroid Build Coastguard Worker   // update to *seq above cannot be re-ordered past any of them.  Note that
134*9356374aSAndroid Build Coastguard Worker   // this barrier is not for the fetch_add above.  A release barrier for the
135*9356374aSAndroid Build Coastguard Worker   // fetch_add would be before it, not after.
136*9356374aSAndroid Build Coastguard Worker   std::atomic_thread_fence(std::memory_order_release);
137*9356374aSAndroid Build Coastguard Worker 
138*9356374aSAndroid Build Coastguard Worker   return x + 2;   // original word plus 2
139*9356374aSAndroid Build Coastguard Worker }
140*9356374aSAndroid Build Coastguard Worker 
141*9356374aSAndroid Build Coastguard Worker // Release seqlock (*seq) by writing x to it---a value previously returned by
142*9356374aSAndroid Build Coastguard Worker // SeqAcquire.
SeqRelease(std::atomic<uint64_t> * seq,uint64_t x)143*9356374aSAndroid Build Coastguard Worker static inline void SeqRelease(std::atomic<uint64_t> *seq, uint64_t x) {
144*9356374aSAndroid Build Coastguard Worker   // The unlock store to *seq must have release ordering so that all
145*9356374aSAndroid Build Coastguard Worker   // updates to shared data must finish before this store.
146*9356374aSAndroid Build Coastguard Worker   seq->store(x, std::memory_order_release);  // release lock for readers
147*9356374aSAndroid Build Coastguard Worker }
148*9356374aSAndroid Build Coastguard Worker 
149*9356374aSAndroid Build Coastguard Worker // ---------------------------------------------------------------------
150*9356374aSAndroid Build Coastguard Worker 
151*9356374aSAndroid Build Coastguard Worker // "nsscaled" is unit of time equal to a (2**kScale)th of a nanosecond.
152*9356374aSAndroid Build Coastguard Worker enum { kScale = 30 };
153*9356374aSAndroid Build Coastguard Worker 
154*9356374aSAndroid Build Coastguard Worker // The minimum interval between samples of the time base.
155*9356374aSAndroid Build Coastguard Worker // We pick enough time to amortize the cost of the sample,
156*9356374aSAndroid Build Coastguard Worker // to get a reasonably accurate cycle counter rate reading,
157*9356374aSAndroid Build Coastguard Worker // and not so much that calculations will overflow 64-bits.
158*9356374aSAndroid Build Coastguard Worker static const uint64_t kMinNSBetweenSamples = 2000 << 20;
159*9356374aSAndroid Build Coastguard Worker 
160*9356374aSAndroid Build Coastguard Worker // We require that kMinNSBetweenSamples shifted by kScale
161*9356374aSAndroid Build Coastguard Worker // have at least a bit left over for 64-bit calculations.
162*9356374aSAndroid Build Coastguard Worker static_assert(((kMinNSBetweenSamples << (kScale + 1)) >> (kScale + 1)) ==
163*9356374aSAndroid Build Coastguard Worker                kMinNSBetweenSamples,
164*9356374aSAndroid Build Coastguard Worker                "cannot represent kMaxBetweenSamplesNSScaled");
165*9356374aSAndroid Build Coastguard Worker 
166*9356374aSAndroid Build Coastguard Worker // data from a sample of the kernel's time value
167*9356374aSAndroid Build Coastguard Worker struct TimeSampleAtomic {
168*9356374aSAndroid Build Coastguard Worker   std::atomic<uint64_t> raw_ns{0};              // raw kernel time
169*9356374aSAndroid Build Coastguard Worker   std::atomic<uint64_t> base_ns{0};             // our estimate of time
170*9356374aSAndroid Build Coastguard Worker   std::atomic<uint64_t> base_cycles{0};         // cycle counter reading
171*9356374aSAndroid Build Coastguard Worker   std::atomic<uint64_t> nsscaled_per_cycle{0};  // cycle period
172*9356374aSAndroid Build Coastguard Worker   // cycles before we'll sample again (a scaled reciprocal of the period,
173*9356374aSAndroid Build Coastguard Worker   // to avoid a division on the fast path).
174*9356374aSAndroid Build Coastguard Worker   std::atomic<uint64_t> min_cycles_per_sample{0};
175*9356374aSAndroid Build Coastguard Worker };
176*9356374aSAndroid Build Coastguard Worker // Same again, but with non-atomic types
177*9356374aSAndroid Build Coastguard Worker struct TimeSample {
178*9356374aSAndroid Build Coastguard Worker   uint64_t raw_ns = 0;                 // raw kernel time
179*9356374aSAndroid Build Coastguard Worker   uint64_t base_ns = 0;                // our estimate of time
180*9356374aSAndroid Build Coastguard Worker   uint64_t base_cycles = 0;            // cycle counter reading
181*9356374aSAndroid Build Coastguard Worker   uint64_t nsscaled_per_cycle = 0;     // cycle period
182*9356374aSAndroid Build Coastguard Worker   uint64_t min_cycles_per_sample = 0;  // approx cycles before next sample
183*9356374aSAndroid Build Coastguard Worker };
184*9356374aSAndroid Build Coastguard Worker 
185*9356374aSAndroid Build Coastguard Worker struct ABSL_CACHELINE_ALIGNED TimeState {
186*9356374aSAndroid Build Coastguard Worker   std::atomic<uint64_t> seq{0};
187*9356374aSAndroid Build Coastguard Worker   TimeSampleAtomic last_sample;  // the last sample; under seq
188*9356374aSAndroid Build Coastguard Worker 
189*9356374aSAndroid Build Coastguard Worker   // The following counters are used only by the test code.
190*9356374aSAndroid Build Coastguard Worker   int64_t stats_initializations{0};
191*9356374aSAndroid Build Coastguard Worker   int64_t stats_reinitializations{0};
192*9356374aSAndroid Build Coastguard Worker   int64_t stats_calibrations{0};
193*9356374aSAndroid Build Coastguard Worker   int64_t stats_slow_paths{0};
194*9356374aSAndroid Build Coastguard Worker   int64_t stats_fast_slow_paths{0};
195*9356374aSAndroid Build Coastguard Worker 
ABSL_GUARDED_BYabsl::TimeState196*9356374aSAndroid Build Coastguard Worker   uint64_t last_now_cycles ABSL_GUARDED_BY(lock){0};
197*9356374aSAndroid Build Coastguard Worker 
198*9356374aSAndroid Build Coastguard Worker   // Used by GetCurrentTimeNanosFromKernel().
199*9356374aSAndroid Build Coastguard Worker   // We try to read clock values at about the same time as the kernel clock.
200*9356374aSAndroid Build Coastguard Worker   // This value gets adjusted up or down as estimate of how long that should
201*9356374aSAndroid Build Coastguard Worker   // take, so we can reject attempts that take unusually long.
202*9356374aSAndroid Build Coastguard Worker   std::atomic<uint64_t> approx_syscall_time_in_cycles{10 * 1000};
203*9356374aSAndroid Build Coastguard Worker   // Number of times in a row we've seen a kernel time call take substantially
204*9356374aSAndroid Build Coastguard Worker   // less than approx_syscall_time_in_cycles.
205*9356374aSAndroid Build Coastguard Worker   std::atomic<uint32_t> kernel_time_seen_smaller{0};
206*9356374aSAndroid Build Coastguard Worker 
207*9356374aSAndroid Build Coastguard Worker   // A reader-writer lock protecting the static locations below.
208*9356374aSAndroid Build Coastguard Worker   // See SeqAcquire() and SeqRelease() above.
209*9356374aSAndroid Build Coastguard Worker   absl::base_internal::SpinLock lock{absl::kConstInit,
210*9356374aSAndroid Build Coastguard Worker                                      base_internal::SCHEDULE_KERNEL_ONLY};
211*9356374aSAndroid Build Coastguard Worker };
212*9356374aSAndroid Build Coastguard Worker ABSL_CONST_INIT static TimeState time_state;
213*9356374aSAndroid Build Coastguard Worker 
214*9356374aSAndroid Build Coastguard Worker // Return the time in ns as told by the kernel interface.  Place in *cycleclock
215*9356374aSAndroid Build Coastguard Worker // the value of the cycleclock at about the time of the syscall.
216*9356374aSAndroid Build Coastguard Worker // This call represents the time base that this module synchronizes to.
217*9356374aSAndroid Build Coastguard Worker // Ensures that *cycleclock does not step back by up to (1 << 16) from
218*9356374aSAndroid Build Coastguard Worker // last_cycleclock, to discard small backward counter steps.  (Larger steps are
219*9356374aSAndroid Build Coastguard Worker // assumed to be complete resyncs, which shouldn't happen.  If they do, a full
220*9356374aSAndroid Build Coastguard Worker // reinitialization of the outer algorithm should occur.)
GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock,uint64_t * cycleclock)221*9356374aSAndroid Build Coastguard Worker static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock,
222*9356374aSAndroid Build Coastguard Worker                                              uint64_t *cycleclock)
223*9356374aSAndroid Build Coastguard Worker     ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) {
224*9356374aSAndroid Build Coastguard Worker   uint64_t local_approx_syscall_time_in_cycles =  // local copy
225*9356374aSAndroid Build Coastguard Worker       time_state.approx_syscall_time_in_cycles.load(std::memory_order_relaxed);
226*9356374aSAndroid Build Coastguard Worker 
227*9356374aSAndroid Build Coastguard Worker   int64_t current_time_nanos_from_system;
228*9356374aSAndroid Build Coastguard Worker   uint64_t before_cycles;
229*9356374aSAndroid Build Coastguard Worker   uint64_t after_cycles;
230*9356374aSAndroid Build Coastguard Worker   uint64_t elapsed_cycles;
231*9356374aSAndroid Build Coastguard Worker   int loops = 0;
232*9356374aSAndroid Build Coastguard Worker   do {
233*9356374aSAndroid Build Coastguard Worker     before_cycles =
234*9356374aSAndroid Build Coastguard Worker         static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
235*9356374aSAndroid Build Coastguard Worker     current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
236*9356374aSAndroid Build Coastguard Worker     after_cycles =
237*9356374aSAndroid Build Coastguard Worker         static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
238*9356374aSAndroid Build Coastguard Worker     // elapsed_cycles is unsigned, so is large on overflow
239*9356374aSAndroid Build Coastguard Worker     elapsed_cycles = after_cycles - before_cycles;
240*9356374aSAndroid Build Coastguard Worker     if (elapsed_cycles >= local_approx_syscall_time_in_cycles &&
241*9356374aSAndroid Build Coastguard Worker         ++loops == 20) {  // clock changed frequencies?  Back off.
242*9356374aSAndroid Build Coastguard Worker       loops = 0;
243*9356374aSAndroid Build Coastguard Worker       if (local_approx_syscall_time_in_cycles < 1000 * 1000) {
244*9356374aSAndroid Build Coastguard Worker         local_approx_syscall_time_in_cycles =
245*9356374aSAndroid Build Coastguard Worker             (local_approx_syscall_time_in_cycles + 1) << 1;
246*9356374aSAndroid Build Coastguard Worker       }
247*9356374aSAndroid Build Coastguard Worker       time_state.approx_syscall_time_in_cycles.store(
248*9356374aSAndroid Build Coastguard Worker           local_approx_syscall_time_in_cycles, std::memory_order_relaxed);
249*9356374aSAndroid Build Coastguard Worker     }
250*9356374aSAndroid Build Coastguard Worker   } while (elapsed_cycles >= local_approx_syscall_time_in_cycles ||
251*9356374aSAndroid Build Coastguard Worker            last_cycleclock - after_cycles < (static_cast<uint64_t>(1) << 16));
252*9356374aSAndroid Build Coastguard Worker 
253*9356374aSAndroid Build Coastguard Worker   // Adjust approx_syscall_time_in_cycles to be within a factor of 2
254*9356374aSAndroid Build Coastguard Worker   // of the typical time to execute one iteration of the loop above.
255*9356374aSAndroid Build Coastguard Worker   if ((local_approx_syscall_time_in_cycles >> 1) < elapsed_cycles) {
256*9356374aSAndroid Build Coastguard Worker     // measured time is no smaller than half current approximation
257*9356374aSAndroid Build Coastguard Worker     time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed);
258*9356374aSAndroid Build Coastguard Worker   } else if (time_state.kernel_time_seen_smaller.fetch_add(
259*9356374aSAndroid Build Coastguard Worker                  1, std::memory_order_relaxed) >= 3) {
260*9356374aSAndroid Build Coastguard Worker     // smaller delays several times in a row; reduce approximation by 12.5%
261*9356374aSAndroid Build Coastguard Worker     const uint64_t new_approximation =
262*9356374aSAndroid Build Coastguard Worker         local_approx_syscall_time_in_cycles -
263*9356374aSAndroid Build Coastguard Worker         (local_approx_syscall_time_in_cycles >> 3);
264*9356374aSAndroid Build Coastguard Worker     time_state.approx_syscall_time_in_cycles.store(new_approximation,
265*9356374aSAndroid Build Coastguard Worker                                                    std::memory_order_relaxed);
266*9356374aSAndroid Build Coastguard Worker     time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed);
267*9356374aSAndroid Build Coastguard Worker   }
268*9356374aSAndroid Build Coastguard Worker 
269*9356374aSAndroid Build Coastguard Worker   *cycleclock = after_cycles;
270*9356374aSAndroid Build Coastguard Worker   return current_time_nanos_from_system;
271*9356374aSAndroid Build Coastguard Worker }
272*9356374aSAndroid Build Coastguard Worker 
273*9356374aSAndroid Build Coastguard Worker static int64_t GetCurrentTimeNanosSlowPath() ABSL_ATTRIBUTE_COLD;
274*9356374aSAndroid Build Coastguard Worker 
275*9356374aSAndroid Build Coastguard Worker // Read the contents of *atomic into *sample.
276*9356374aSAndroid Build Coastguard Worker // Each field is read atomically, but to maintain atomicity between fields,
277*9356374aSAndroid Build Coastguard Worker // the access must be done under a lock.
ReadTimeSampleAtomic(const struct TimeSampleAtomic * atomic,struct TimeSample * sample)278*9356374aSAndroid Build Coastguard Worker static void ReadTimeSampleAtomic(const struct TimeSampleAtomic *atomic,
279*9356374aSAndroid Build Coastguard Worker                                  struct TimeSample *sample) {
280*9356374aSAndroid Build Coastguard Worker   sample->base_ns = atomic->base_ns.load(std::memory_order_relaxed);
281*9356374aSAndroid Build Coastguard Worker   sample->base_cycles = atomic->base_cycles.load(std::memory_order_relaxed);
282*9356374aSAndroid Build Coastguard Worker   sample->nsscaled_per_cycle =
283*9356374aSAndroid Build Coastguard Worker       atomic->nsscaled_per_cycle.load(std::memory_order_relaxed);
284*9356374aSAndroid Build Coastguard Worker   sample->min_cycles_per_sample =
285*9356374aSAndroid Build Coastguard Worker       atomic->min_cycles_per_sample.load(std::memory_order_relaxed);
286*9356374aSAndroid Build Coastguard Worker   sample->raw_ns = atomic->raw_ns.load(std::memory_order_relaxed);
287*9356374aSAndroid Build Coastguard Worker }
288*9356374aSAndroid Build Coastguard Worker 
289*9356374aSAndroid Build Coastguard Worker // Public routine.
290*9356374aSAndroid Build Coastguard Worker // Algorithm:  We wish to compute real time from a cycle counter.  In normal
291*9356374aSAndroid Build Coastguard Worker // operation, we construct a piecewise linear approximation to the kernel time
292*9356374aSAndroid Build Coastguard Worker // source, using the cycle counter value.  The start of each line segment is at
293*9356374aSAndroid Build Coastguard Worker // the same point as the end of the last, but may have a different slope (that
294*9356374aSAndroid Build Coastguard Worker // is, a different idea of the cycle counter frequency).  Every couple of
295*9356374aSAndroid Build Coastguard Worker // seconds, the kernel time source is sampled and compared with the current
296*9356374aSAndroid Build Coastguard Worker // approximation.  A new slope is chosen that, if followed for another couple
297*9356374aSAndroid Build Coastguard Worker // of seconds, will correct the error at the current position.  The information
298*9356374aSAndroid Build Coastguard Worker // for a sample is in the "last_sample" struct.  The linear approximation is
299*9356374aSAndroid Build Coastguard Worker //   estimated_time = last_sample.base_ns +
300*9356374aSAndroid Build Coastguard Worker //     last_sample.ns_per_cycle * (counter_reading - last_sample.base_cycles)
301*9356374aSAndroid Build Coastguard Worker // (ns_per_cycle is actually stored in different units and scaled, to avoid
302*9356374aSAndroid Build Coastguard Worker // overflow).  The base_ns of the next linear approximation is the
303*9356374aSAndroid Build Coastguard Worker // estimated_time using the last approximation; the base_cycles is the cycle
304*9356374aSAndroid Build Coastguard Worker // counter value at that time; the ns_per_cycle is the number of ns per cycle
305*9356374aSAndroid Build Coastguard Worker // measured since the last sample, but adjusted so that most of the difference
306*9356374aSAndroid Build Coastguard Worker // between the estimated_time and the kernel time will be corrected by the
307*9356374aSAndroid Build Coastguard Worker // estimated time to the next sample.  In normal operation, this algorithm
308*9356374aSAndroid Build Coastguard Worker // relies on:
309*9356374aSAndroid Build Coastguard Worker // - the cycle counter and kernel time rates not changing a lot in a few
310*9356374aSAndroid Build Coastguard Worker //   seconds.
311*9356374aSAndroid Build Coastguard Worker // - the client calling into the code often compared to a couple of seconds, so
312*9356374aSAndroid Build Coastguard Worker //   the time to the next correction can be estimated.
313*9356374aSAndroid Build Coastguard Worker // Any time ns_per_cycle is not known, a major error is detected, or the
314*9356374aSAndroid Build Coastguard Worker // assumption about frequent calls is violated, the implementation returns the
315*9356374aSAndroid Build Coastguard Worker // kernel time.  It records sufficient data that a linear approximation can
316*9356374aSAndroid Build Coastguard Worker // resume a little later.
317*9356374aSAndroid Build Coastguard Worker 
GetCurrentTimeNanos()318*9356374aSAndroid Build Coastguard Worker int64_t GetCurrentTimeNanos() {
319*9356374aSAndroid Build Coastguard Worker   // read the data from the "last_sample" struct (but don't need raw_ns yet)
320*9356374aSAndroid Build Coastguard Worker   // The reads of "seq" and test of the values emulate a reader lock.
321*9356374aSAndroid Build Coastguard Worker   uint64_t base_ns;
322*9356374aSAndroid Build Coastguard Worker   uint64_t base_cycles;
323*9356374aSAndroid Build Coastguard Worker   uint64_t nsscaled_per_cycle;
324*9356374aSAndroid Build Coastguard Worker   uint64_t min_cycles_per_sample;
325*9356374aSAndroid Build Coastguard Worker   uint64_t seq_read0;
326*9356374aSAndroid Build Coastguard Worker   uint64_t seq_read1;
327*9356374aSAndroid Build Coastguard Worker 
328*9356374aSAndroid Build Coastguard Worker   // If we have enough information to interpolate, the value returned will be
329*9356374aSAndroid Build Coastguard Worker   // derived from this cycleclock-derived time estimate.  On some platforms
330*9356374aSAndroid Build Coastguard Worker   // (POWER) the function to retrieve this value has enough complexity to
331*9356374aSAndroid Build Coastguard Worker   // contribute to register pressure - reading it early before initializing
332*9356374aSAndroid Build Coastguard Worker   // the other pieces of the calculation minimizes spill/restore instructions,
333*9356374aSAndroid Build Coastguard Worker   // minimizing icache cost.
334*9356374aSAndroid Build Coastguard Worker   uint64_t now_cycles =
335*9356374aSAndroid Build Coastguard Worker       static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
336*9356374aSAndroid Build Coastguard Worker 
337*9356374aSAndroid Build Coastguard Worker   // Acquire pairs with the barrier in SeqRelease - if this load sees that
338*9356374aSAndroid Build Coastguard Worker   // store, the shared-data reads necessarily see that SeqRelease's updates
339*9356374aSAndroid Build Coastguard Worker   // to the same shared data.
340*9356374aSAndroid Build Coastguard Worker   seq_read0 = time_state.seq.load(std::memory_order_acquire);
341*9356374aSAndroid Build Coastguard Worker 
342*9356374aSAndroid Build Coastguard Worker   base_ns = time_state.last_sample.base_ns.load(std::memory_order_relaxed);
343*9356374aSAndroid Build Coastguard Worker   base_cycles =
344*9356374aSAndroid Build Coastguard Worker       time_state.last_sample.base_cycles.load(std::memory_order_relaxed);
345*9356374aSAndroid Build Coastguard Worker   nsscaled_per_cycle =
346*9356374aSAndroid Build Coastguard Worker       time_state.last_sample.nsscaled_per_cycle.load(std::memory_order_relaxed);
347*9356374aSAndroid Build Coastguard Worker   min_cycles_per_sample = time_state.last_sample.min_cycles_per_sample.load(
348*9356374aSAndroid Build Coastguard Worker       std::memory_order_relaxed);
349*9356374aSAndroid Build Coastguard Worker 
350*9356374aSAndroid Build Coastguard Worker   // This acquire fence pairs with the release fence in SeqAcquire.  Since it
351*9356374aSAndroid Build Coastguard Worker   // is sequenced between reads of shared data and seq_read1, the reads of
352*9356374aSAndroid Build Coastguard Worker   // shared data are effectively acquiring.
353*9356374aSAndroid Build Coastguard Worker   std::atomic_thread_fence(std::memory_order_acquire);
354*9356374aSAndroid Build Coastguard Worker 
355*9356374aSAndroid Build Coastguard Worker   // The shared-data reads are effectively acquire ordered, and the
356*9356374aSAndroid Build Coastguard Worker   // shared-data writes are effectively release ordered. Therefore if our
357*9356374aSAndroid Build Coastguard Worker   // shared-data reads see any of a particular update's shared-data writes,
358*9356374aSAndroid Build Coastguard Worker   // seq_read1 is guaranteed to see that update's SeqAcquire.
359*9356374aSAndroid Build Coastguard Worker   seq_read1 = time_state.seq.load(std::memory_order_relaxed);
360*9356374aSAndroid Build Coastguard Worker 
361*9356374aSAndroid Build Coastguard Worker   // Fast path.  Return if min_cycles_per_sample has not yet elapsed since the
362*9356374aSAndroid Build Coastguard Worker   // last sample, and we read a consistent sample.  The fast path activates
363*9356374aSAndroid Build Coastguard Worker   // only when min_cycles_per_sample is non-zero, which happens when we get an
364*9356374aSAndroid Build Coastguard Worker   // estimate for the cycle time.  The predicate will fail if now_cycles <
365*9356374aSAndroid Build Coastguard Worker   // base_cycles, or if some other thread is in the slow path.
366*9356374aSAndroid Build Coastguard Worker   //
367*9356374aSAndroid Build Coastguard Worker   // Since we now read now_cycles before base_ns, it is possible for now_cycles
368*9356374aSAndroid Build Coastguard Worker   // to be less than base_cycles (if we were interrupted between those loads and
369*9356374aSAndroid Build Coastguard Worker   // last_sample was updated). This is harmless, because delta_cycles will wrap
370*9356374aSAndroid Build Coastguard Worker   // and report a time much much bigger than min_cycles_per_sample. In that case
371*9356374aSAndroid Build Coastguard Worker   // we will take the slow path.
372*9356374aSAndroid Build Coastguard Worker   uint64_t delta_cycles;
373*9356374aSAndroid Build Coastguard Worker   if (seq_read0 == seq_read1 && (seq_read0 & 1) == 0 &&
374*9356374aSAndroid Build Coastguard Worker       (delta_cycles = now_cycles - base_cycles) < min_cycles_per_sample) {
375*9356374aSAndroid Build Coastguard Worker     return static_cast<int64_t>(
376*9356374aSAndroid Build Coastguard Worker         base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale));
377*9356374aSAndroid Build Coastguard Worker   }
378*9356374aSAndroid Build Coastguard Worker   return GetCurrentTimeNanosSlowPath();
379*9356374aSAndroid Build Coastguard Worker }
380*9356374aSAndroid Build Coastguard Worker 
381*9356374aSAndroid Build Coastguard Worker // Return (a << kScale)/b.
382*9356374aSAndroid Build Coastguard Worker // Zero is returned if b==0.   Scaling is performed internally to
383*9356374aSAndroid Build Coastguard Worker // preserve precision without overflow.
SafeDivideAndScale(uint64_t a,uint64_t b)384*9356374aSAndroid Build Coastguard Worker static uint64_t SafeDivideAndScale(uint64_t a, uint64_t b) {
385*9356374aSAndroid Build Coastguard Worker   // Find maximum safe_shift so that
386*9356374aSAndroid Build Coastguard Worker   //  0 <= safe_shift <= kScale  and  (a << safe_shift) does not overflow.
387*9356374aSAndroid Build Coastguard Worker   int safe_shift = kScale;
388*9356374aSAndroid Build Coastguard Worker   while (((a << safe_shift) >> safe_shift) != a) {
389*9356374aSAndroid Build Coastguard Worker     safe_shift--;
390*9356374aSAndroid Build Coastguard Worker   }
391*9356374aSAndroid Build Coastguard Worker   uint64_t scaled_b = b >> (kScale - safe_shift);
392*9356374aSAndroid Build Coastguard Worker   uint64_t quotient = 0;
393*9356374aSAndroid Build Coastguard Worker   if (scaled_b != 0) {
394*9356374aSAndroid Build Coastguard Worker     quotient = (a << safe_shift) / scaled_b;
395*9356374aSAndroid Build Coastguard Worker   }
396*9356374aSAndroid Build Coastguard Worker   return quotient;
397*9356374aSAndroid Build Coastguard Worker }
398*9356374aSAndroid Build Coastguard Worker 
399*9356374aSAndroid Build Coastguard Worker static uint64_t UpdateLastSample(
400*9356374aSAndroid Build Coastguard Worker     uint64_t now_cycles, uint64_t now_ns, uint64_t delta_cycles,
401*9356374aSAndroid Build Coastguard Worker     const struct TimeSample *sample) ABSL_ATTRIBUTE_COLD;
402*9356374aSAndroid Build Coastguard Worker 
403*9356374aSAndroid Build Coastguard Worker // The slow path of GetCurrentTimeNanos().  This is taken while gathering
404*9356374aSAndroid Build Coastguard Worker // initial samples, when enough time has elapsed since the last sample, and if
405*9356374aSAndroid Build Coastguard Worker // any other thread is writing to last_sample.
406*9356374aSAndroid Build Coastguard Worker //
407*9356374aSAndroid Build Coastguard Worker // Manually mark this 'noinline' to minimize stack frame size of the fast
408*9356374aSAndroid Build Coastguard Worker // path.  Without this, sometimes a compiler may inline this big block of code
409*9356374aSAndroid Build Coastguard Worker // into the fast path.  That causes lots of register spills and reloads that
410*9356374aSAndroid Build Coastguard Worker // are unnecessary unless the slow path is taken.
411*9356374aSAndroid Build Coastguard Worker //
412*9356374aSAndroid Build Coastguard Worker // TODO(absl-team): Remove this attribute when our compiler is smart enough
413*9356374aSAndroid Build Coastguard Worker // to do the right thing.
414*9356374aSAndroid Build Coastguard Worker ABSL_ATTRIBUTE_NOINLINE
GetCurrentTimeNanosSlowPath()415*9356374aSAndroid Build Coastguard Worker static int64_t GetCurrentTimeNanosSlowPath()
416*9356374aSAndroid Build Coastguard Worker     ABSL_LOCKS_EXCLUDED(time_state.lock) {
417*9356374aSAndroid Build Coastguard Worker   // Serialize access to slow-path.  Fast-path readers are not blocked yet, and
418*9356374aSAndroid Build Coastguard Worker   // code below must not modify last_sample until the seqlock is acquired.
419*9356374aSAndroid Build Coastguard Worker   time_state.lock.Lock();
420*9356374aSAndroid Build Coastguard Worker 
421*9356374aSAndroid Build Coastguard Worker   // Sample the kernel time base.  This is the definition of
422*9356374aSAndroid Build Coastguard Worker   // "now" if we take the slow path.
423*9356374aSAndroid Build Coastguard Worker   uint64_t now_cycles;
424*9356374aSAndroid Build Coastguard Worker   uint64_t now_ns = static_cast<uint64_t>(
425*9356374aSAndroid Build Coastguard Worker       GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles));
426*9356374aSAndroid Build Coastguard Worker   time_state.last_now_cycles = now_cycles;
427*9356374aSAndroid Build Coastguard Worker 
428*9356374aSAndroid Build Coastguard Worker   uint64_t estimated_base_ns;
429*9356374aSAndroid Build Coastguard Worker 
430*9356374aSAndroid Build Coastguard Worker   // ----------
431*9356374aSAndroid Build Coastguard Worker   // Read the "last_sample" values again; this time holding the write lock.
432*9356374aSAndroid Build Coastguard Worker   struct TimeSample sample;
433*9356374aSAndroid Build Coastguard Worker   ReadTimeSampleAtomic(&time_state.last_sample, &sample);
434*9356374aSAndroid Build Coastguard Worker 
435*9356374aSAndroid Build Coastguard Worker   // ----------
436*9356374aSAndroid Build Coastguard Worker   // Try running the fast path again; another thread may have updated the
437*9356374aSAndroid Build Coastguard Worker   // sample between our run of the fast path and the sample we just read.
438*9356374aSAndroid Build Coastguard Worker   uint64_t delta_cycles = now_cycles - sample.base_cycles;
439*9356374aSAndroid Build Coastguard Worker   if (delta_cycles < sample.min_cycles_per_sample) {
440*9356374aSAndroid Build Coastguard Worker     // Another thread updated the sample.  This path does not take the seqlock
441*9356374aSAndroid Build Coastguard Worker     // so that blocked readers can make progress without blocking new readers.
442*9356374aSAndroid Build Coastguard Worker     estimated_base_ns = sample.base_ns +
443*9356374aSAndroid Build Coastguard Worker         ((delta_cycles * sample.nsscaled_per_cycle) >> kScale);
444*9356374aSAndroid Build Coastguard Worker     time_state.stats_fast_slow_paths++;
445*9356374aSAndroid Build Coastguard Worker   } else {
446*9356374aSAndroid Build Coastguard Worker     estimated_base_ns =
447*9356374aSAndroid Build Coastguard Worker         UpdateLastSample(now_cycles, now_ns, delta_cycles, &sample);
448*9356374aSAndroid Build Coastguard Worker   }
449*9356374aSAndroid Build Coastguard Worker 
450*9356374aSAndroid Build Coastguard Worker   time_state.lock.Unlock();
451*9356374aSAndroid Build Coastguard Worker 
452*9356374aSAndroid Build Coastguard Worker   return static_cast<int64_t>(estimated_base_ns);
453*9356374aSAndroid Build Coastguard Worker }
454*9356374aSAndroid Build Coastguard Worker 
455*9356374aSAndroid Build Coastguard Worker // Main part of the algorithm.  Locks out readers, updates the approximation
456*9356374aSAndroid Build Coastguard Worker // using the new sample from the kernel, and stores the result in last_sample
457*9356374aSAndroid Build Coastguard Worker // for readers.  Returns the new estimated time.
UpdateLastSample(uint64_t now_cycles,uint64_t now_ns,uint64_t delta_cycles,const struct TimeSample * sample)458*9356374aSAndroid Build Coastguard Worker static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
459*9356374aSAndroid Build Coastguard Worker                                  uint64_t delta_cycles,
460*9356374aSAndroid Build Coastguard Worker                                  const struct TimeSample *sample)
461*9356374aSAndroid Build Coastguard Worker     ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) {
462*9356374aSAndroid Build Coastguard Worker   uint64_t estimated_base_ns = now_ns;
463*9356374aSAndroid Build Coastguard Worker   uint64_t lock_value =
464*9356374aSAndroid Build Coastguard Worker       SeqAcquire(&time_state.seq);  // acquire seqlock to block readers
465*9356374aSAndroid Build Coastguard Worker 
466*9356374aSAndroid Build Coastguard Worker   // The 5s in the next if-statement limits the time for which we will trust
467*9356374aSAndroid Build Coastguard Worker   // the cycle counter and our last sample to give a reasonable result.
468*9356374aSAndroid Build Coastguard Worker   // Errors in the rate of the source clock can be multiplied by the ratio
469*9356374aSAndroid Build Coastguard Worker   // between this limit and kMinNSBetweenSamples.
470*9356374aSAndroid Build Coastguard Worker   if (sample->raw_ns == 0 ||  // no recent sample, or clock went backwards
471*9356374aSAndroid Build Coastguard Worker       sample->raw_ns + static_cast<uint64_t>(5) * 1000 * 1000 * 1000 < now_ns ||
472*9356374aSAndroid Build Coastguard Worker       now_ns < sample->raw_ns || now_cycles < sample->base_cycles) {
473*9356374aSAndroid Build Coastguard Worker     // record this sample, and forget any previously known slope.
474*9356374aSAndroid Build Coastguard Worker     time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
475*9356374aSAndroid Build Coastguard Worker     time_state.last_sample.base_ns.store(estimated_base_ns,
476*9356374aSAndroid Build Coastguard Worker                                          std::memory_order_relaxed);
477*9356374aSAndroid Build Coastguard Worker     time_state.last_sample.base_cycles.store(now_cycles,
478*9356374aSAndroid Build Coastguard Worker                                              std::memory_order_relaxed);
479*9356374aSAndroid Build Coastguard Worker     time_state.last_sample.nsscaled_per_cycle.store(0,
480*9356374aSAndroid Build Coastguard Worker                                                     std::memory_order_relaxed);
481*9356374aSAndroid Build Coastguard Worker     time_state.last_sample.min_cycles_per_sample.store(
482*9356374aSAndroid Build Coastguard Worker         0, std::memory_order_relaxed);
483*9356374aSAndroid Build Coastguard Worker     time_state.stats_initializations++;
484*9356374aSAndroid Build Coastguard Worker   } else if (sample->raw_ns + 500 * 1000 * 1000 < now_ns &&
485*9356374aSAndroid Build Coastguard Worker              sample->base_cycles + 50 < now_cycles) {
486*9356374aSAndroid Build Coastguard Worker     // Enough time has passed to compute the cycle time.
487*9356374aSAndroid Build Coastguard Worker     if (sample->nsscaled_per_cycle != 0) {  // Have a cycle time estimate.
488*9356374aSAndroid Build Coastguard Worker       // Compute time from counter reading, but avoiding overflow
489*9356374aSAndroid Build Coastguard Worker       // delta_cycles may be larger than on the fast path.
490*9356374aSAndroid Build Coastguard Worker       uint64_t estimated_scaled_ns;
491*9356374aSAndroid Build Coastguard Worker       int s = -1;
492*9356374aSAndroid Build Coastguard Worker       do {
493*9356374aSAndroid Build Coastguard Worker         s++;
494*9356374aSAndroid Build Coastguard Worker         estimated_scaled_ns = (delta_cycles >> s) * sample->nsscaled_per_cycle;
495*9356374aSAndroid Build Coastguard Worker       } while (estimated_scaled_ns / sample->nsscaled_per_cycle !=
496*9356374aSAndroid Build Coastguard Worker                (delta_cycles >> s));
497*9356374aSAndroid Build Coastguard Worker       estimated_base_ns = sample->base_ns +
498*9356374aSAndroid Build Coastguard Worker                           (estimated_scaled_ns >> (kScale - s));
499*9356374aSAndroid Build Coastguard Worker     }
500*9356374aSAndroid Build Coastguard Worker 
501*9356374aSAndroid Build Coastguard Worker     // Compute the assumed cycle time kMinNSBetweenSamples ns into the future
502*9356374aSAndroid Build Coastguard Worker     // assuming the cycle counter rate stays the same as the last interval.
503*9356374aSAndroid Build Coastguard Worker     uint64_t ns = now_ns - sample->raw_ns;
504*9356374aSAndroid Build Coastguard Worker     uint64_t measured_nsscaled_per_cycle = SafeDivideAndScale(ns, delta_cycles);
505*9356374aSAndroid Build Coastguard Worker 
506*9356374aSAndroid Build Coastguard Worker     uint64_t assumed_next_sample_delta_cycles =
507*9356374aSAndroid Build Coastguard Worker         SafeDivideAndScale(kMinNSBetweenSamples, measured_nsscaled_per_cycle);
508*9356374aSAndroid Build Coastguard Worker 
509*9356374aSAndroid Build Coastguard Worker     // Estimate low by this much.
510*9356374aSAndroid Build Coastguard Worker     int64_t diff_ns = static_cast<int64_t>(now_ns - estimated_base_ns);
511*9356374aSAndroid Build Coastguard Worker 
512*9356374aSAndroid Build Coastguard Worker     // We want to set nsscaled_per_cycle so that our estimate of the ns time
513*9356374aSAndroid Build Coastguard Worker     // at the assumed cycle time is the assumed ns time.
514*9356374aSAndroid Build Coastguard Worker     // That is, we want to set nsscaled_per_cycle so:
515*9356374aSAndroid Build Coastguard Worker     //  kMinNSBetweenSamples + diff_ns  ==
516*9356374aSAndroid Build Coastguard Worker     //  (assumed_next_sample_delta_cycles * nsscaled_per_cycle) >> kScale
517*9356374aSAndroid Build Coastguard Worker     // But we wish to damp oscillations, so instead correct only most
518*9356374aSAndroid Build Coastguard Worker     // of our current error, by solving:
519*9356374aSAndroid Build Coastguard Worker     //  kMinNSBetweenSamples + diff_ns - (diff_ns / 16) ==
520*9356374aSAndroid Build Coastguard Worker     //  (assumed_next_sample_delta_cycles * nsscaled_per_cycle) >> kScale
521*9356374aSAndroid Build Coastguard Worker     ns = static_cast<uint64_t>(static_cast<int64_t>(kMinNSBetweenSamples) +
522*9356374aSAndroid Build Coastguard Worker                                diff_ns - (diff_ns / 16));
523*9356374aSAndroid Build Coastguard Worker     uint64_t new_nsscaled_per_cycle =
524*9356374aSAndroid Build Coastguard Worker         SafeDivideAndScale(ns, assumed_next_sample_delta_cycles);
525*9356374aSAndroid Build Coastguard Worker     if (new_nsscaled_per_cycle != 0 &&
526*9356374aSAndroid Build Coastguard Worker         diff_ns < 100 * 1000 * 1000 && -diff_ns < 100 * 1000 * 1000) {
527*9356374aSAndroid Build Coastguard Worker       // record the cycle time measurement
528*9356374aSAndroid Build Coastguard Worker       time_state.last_sample.nsscaled_per_cycle.store(
529*9356374aSAndroid Build Coastguard Worker           new_nsscaled_per_cycle, std::memory_order_relaxed);
530*9356374aSAndroid Build Coastguard Worker       uint64_t new_min_cycles_per_sample =
531*9356374aSAndroid Build Coastguard Worker           SafeDivideAndScale(kMinNSBetweenSamples, new_nsscaled_per_cycle);
532*9356374aSAndroid Build Coastguard Worker       time_state.last_sample.min_cycles_per_sample.store(
533*9356374aSAndroid Build Coastguard Worker           new_min_cycles_per_sample, std::memory_order_relaxed);
534*9356374aSAndroid Build Coastguard Worker       time_state.stats_calibrations++;
535*9356374aSAndroid Build Coastguard Worker     } else {  // something went wrong; forget the slope
536*9356374aSAndroid Build Coastguard Worker       time_state.last_sample.nsscaled_per_cycle.store(
537*9356374aSAndroid Build Coastguard Worker           0, std::memory_order_relaxed);
538*9356374aSAndroid Build Coastguard Worker       time_state.last_sample.min_cycles_per_sample.store(
539*9356374aSAndroid Build Coastguard Worker           0, std::memory_order_relaxed);
540*9356374aSAndroid Build Coastguard Worker       estimated_base_ns = now_ns;
541*9356374aSAndroid Build Coastguard Worker       time_state.stats_reinitializations++;
542*9356374aSAndroid Build Coastguard Worker     }
543*9356374aSAndroid Build Coastguard Worker     time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
544*9356374aSAndroid Build Coastguard Worker     time_state.last_sample.base_ns.store(estimated_base_ns,
545*9356374aSAndroid Build Coastguard Worker                                          std::memory_order_relaxed);
546*9356374aSAndroid Build Coastguard Worker     time_state.last_sample.base_cycles.store(now_cycles,
547*9356374aSAndroid Build Coastguard Worker                                              std::memory_order_relaxed);
548*9356374aSAndroid Build Coastguard Worker   } else {
549*9356374aSAndroid Build Coastguard Worker     // have a sample, but no slope; waiting for enough time for a calibration
550*9356374aSAndroid Build Coastguard Worker     time_state.stats_slow_paths++;
551*9356374aSAndroid Build Coastguard Worker   }
552*9356374aSAndroid Build Coastguard Worker 
553*9356374aSAndroid Build Coastguard Worker   SeqRelease(&time_state.seq, lock_value);  // release the readers
554*9356374aSAndroid Build Coastguard Worker 
555*9356374aSAndroid Build Coastguard Worker   return estimated_base_ns;
556*9356374aSAndroid Build Coastguard Worker }
557*9356374aSAndroid Build Coastguard Worker ABSL_NAMESPACE_END
558*9356374aSAndroid Build Coastguard Worker }  // namespace absl
559*9356374aSAndroid Build Coastguard Worker #endif  // ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
560*9356374aSAndroid Build Coastguard Worker 
561*9356374aSAndroid Build Coastguard Worker namespace absl {
562*9356374aSAndroid Build Coastguard Worker ABSL_NAMESPACE_BEGIN
563*9356374aSAndroid Build Coastguard Worker namespace {
564*9356374aSAndroid Build Coastguard Worker 
565*9356374aSAndroid Build Coastguard Worker // Returns the maximum duration that SleepOnce() can sleep for.
MaxSleep()566*9356374aSAndroid Build Coastguard Worker constexpr absl::Duration MaxSleep() {
567*9356374aSAndroid Build Coastguard Worker #ifdef _WIN32
568*9356374aSAndroid Build Coastguard Worker   // Windows Sleep() takes unsigned long argument in milliseconds.
569*9356374aSAndroid Build Coastguard Worker   return absl::Milliseconds(
570*9356374aSAndroid Build Coastguard Worker       std::numeric_limits<unsigned long>::max());  // NOLINT(runtime/int)
571*9356374aSAndroid Build Coastguard Worker #else
572*9356374aSAndroid Build Coastguard Worker   return absl::Seconds(std::numeric_limits<time_t>::max());
573*9356374aSAndroid Build Coastguard Worker #endif
574*9356374aSAndroid Build Coastguard Worker }
575*9356374aSAndroid Build Coastguard Worker 
576*9356374aSAndroid Build Coastguard Worker // Sleeps for the given duration.
577*9356374aSAndroid Build Coastguard Worker // REQUIRES: to_sleep <= MaxSleep().
SleepOnce(absl::Duration to_sleep)578*9356374aSAndroid Build Coastguard Worker void SleepOnce(absl::Duration to_sleep) {
579*9356374aSAndroid Build Coastguard Worker #ifdef _WIN32
580*9356374aSAndroid Build Coastguard Worker   Sleep(static_cast<DWORD>(to_sleep / absl::Milliseconds(1)));
581*9356374aSAndroid Build Coastguard Worker #else
582*9356374aSAndroid Build Coastguard Worker   struct timespec sleep_time = absl::ToTimespec(to_sleep);
583*9356374aSAndroid Build Coastguard Worker   while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {
584*9356374aSAndroid Build Coastguard Worker     // Ignore signals and wait for the full interval to elapse.
585*9356374aSAndroid Build Coastguard Worker   }
586*9356374aSAndroid Build Coastguard Worker #endif
587*9356374aSAndroid Build Coastguard Worker }
588*9356374aSAndroid Build Coastguard Worker 
589*9356374aSAndroid Build Coastguard Worker }  // namespace
590*9356374aSAndroid Build Coastguard Worker ABSL_NAMESPACE_END
591*9356374aSAndroid Build Coastguard Worker }  // namespace absl
592*9356374aSAndroid Build Coastguard Worker 
593*9356374aSAndroid Build Coastguard Worker extern "C" {
594*9356374aSAndroid Build Coastguard Worker 
ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)595*9356374aSAndroid Build Coastguard Worker ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(
596*9356374aSAndroid Build Coastguard Worker     absl::Duration duration) {
597*9356374aSAndroid Build Coastguard Worker   while (duration > absl::ZeroDuration()) {
598*9356374aSAndroid Build Coastguard Worker     absl::Duration to_sleep = std::min(duration, absl::MaxSleep());
599*9356374aSAndroid Build Coastguard Worker     absl::SleepOnce(to_sleep);
600*9356374aSAndroid Build Coastguard Worker     duration -= to_sleep;
601*9356374aSAndroid Build Coastguard Worker   }
602*9356374aSAndroid Build Coastguard Worker }
603*9356374aSAndroid Build Coastguard Worker 
604*9356374aSAndroid Build Coastguard Worker }  // extern "C"
605