1*7c3d14c8STreehugger Robot //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
2*7c3d14c8STreehugger Robot //
3*7c3d14c8STreehugger Robot // The LLVM Compiler Infrastructure
4*7c3d14c8STreehugger Robot //
5*7c3d14c8STreehugger Robot // This file is distributed under the University of Illinois Open Source
6*7c3d14c8STreehugger Robot // License. See LICENSE.TXT for details.
7*7c3d14c8STreehugger Robot //
8*7c3d14c8STreehugger Robot //===----------------------------------------------------------------------===//
9*7c3d14c8STreehugger Robot //
10*7c3d14c8STreehugger Robot // This file is a part of ThreadSanitizer (TSan), a race detector.
11*7c3d14c8STreehugger Robot //
12*7c3d14c8STreehugger Robot // Main internal TSan header file.
13*7c3d14c8STreehugger Robot //
14*7c3d14c8STreehugger Robot // Ground rules:
15*7c3d14c8STreehugger Robot // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
16*7c3d14c8STreehugger Robot // function-scope locals)
17*7c3d14c8STreehugger Robot // - All functions/classes/etc reside in namespace __tsan, except for those
18*7c3d14c8STreehugger Robot // declared in tsan_interface.h.
19*7c3d14c8STreehugger Robot // - Platform-specific files should be used instead of ifdefs (*).
20*7c3d14c8STreehugger Robot // - No system headers included in header files (*).
21*7c3d14c8STreehugger Robot // - Platform specific headres included only into platform-specific files (*).
22*7c3d14c8STreehugger Robot //
23*7c3d14c8STreehugger Robot // (*) Except when inlining is critical for performance.
24*7c3d14c8STreehugger Robot //===----------------------------------------------------------------------===//
25*7c3d14c8STreehugger Robot
26*7c3d14c8STreehugger Robot #ifndef TSAN_RTL_H
27*7c3d14c8STreehugger Robot #define TSAN_RTL_H
28*7c3d14c8STreehugger Robot
29*7c3d14c8STreehugger Robot #include "sanitizer_common/sanitizer_allocator.h"
30*7c3d14c8STreehugger Robot #include "sanitizer_common/sanitizer_allocator_internal.h"
31*7c3d14c8STreehugger Robot #include "sanitizer_common/sanitizer_asm.h"
32*7c3d14c8STreehugger Robot #include "sanitizer_common/sanitizer_common.h"
33*7c3d14c8STreehugger Robot #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
34*7c3d14c8STreehugger Robot #include "sanitizer_common/sanitizer_libignore.h"
35*7c3d14c8STreehugger Robot #include "sanitizer_common/sanitizer_suppressions.h"
36*7c3d14c8STreehugger Robot #include "sanitizer_common/sanitizer_thread_registry.h"
37*7c3d14c8STreehugger Robot #include "tsan_clock.h"
38*7c3d14c8STreehugger Robot #include "tsan_defs.h"
39*7c3d14c8STreehugger Robot #include "tsan_flags.h"
40*7c3d14c8STreehugger Robot #include "tsan_sync.h"
41*7c3d14c8STreehugger Robot #include "tsan_trace.h"
42*7c3d14c8STreehugger Robot #include "tsan_vector.h"
43*7c3d14c8STreehugger Robot #include "tsan_report.h"
44*7c3d14c8STreehugger Robot #include "tsan_platform.h"
45*7c3d14c8STreehugger Robot #include "tsan_mutexset.h"
46*7c3d14c8STreehugger Robot #include "tsan_ignoreset.h"
47*7c3d14c8STreehugger Robot #include "tsan_stack_trace.h"
48*7c3d14c8STreehugger Robot
49*7c3d14c8STreehugger Robot #if SANITIZER_WORDSIZE != 64
50*7c3d14c8STreehugger Robot # error "ThreadSanitizer is supported only on 64-bit platforms"
51*7c3d14c8STreehugger Robot #endif
52*7c3d14c8STreehugger Robot
53*7c3d14c8STreehugger Robot namespace __tsan {
54*7c3d14c8STreehugger Robot
55*7c3d14c8STreehugger Robot #ifndef SANITIZER_GO
56*7c3d14c8STreehugger Robot struct MapUnmapCallback;
57*7c3d14c8STreehugger Robot #if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
58*7c3d14c8STreehugger Robot static const uptr kAllocatorSpace = 0;
59*7c3d14c8STreehugger Robot static const uptr kAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
60*7c3d14c8STreehugger Robot static const uptr kAllocatorRegionSizeLog = 20;
61*7c3d14c8STreehugger Robot static const uptr kAllocatorNumRegions =
62*7c3d14c8STreehugger Robot kAllocatorSize >> kAllocatorRegionSizeLog;
63*7c3d14c8STreehugger Robot typedef TwoLevelByteMap<(kAllocatorNumRegions >> 12), 1 << 12,
64*7c3d14c8STreehugger Robot MapUnmapCallback> ByteMap;
65*7c3d14c8STreehugger Robot typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0,
66*7c3d14c8STreehugger Robot CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap,
67*7c3d14c8STreehugger Robot MapUnmapCallback> PrimaryAllocator;
68*7c3d14c8STreehugger Robot #else
69*7c3d14c8STreehugger Robot typedef SizeClassAllocator64<Mapping::kHeapMemBeg,
70*7c3d14c8STreehugger Robot Mapping::kHeapMemEnd - Mapping::kHeapMemBeg, 0,
71*7c3d14c8STreehugger Robot DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
72*7c3d14c8STreehugger Robot #endif
73*7c3d14c8STreehugger Robot typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
74*7c3d14c8STreehugger Robot typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
75*7c3d14c8STreehugger Robot typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
76*7c3d14c8STreehugger Robot SecondaryAllocator> Allocator;
77*7c3d14c8STreehugger Robot Allocator *allocator();
78*7c3d14c8STreehugger Robot #endif
79*7c3d14c8STreehugger Robot
80*7c3d14c8STreehugger Robot void TsanCheckFailed(const char *file, int line, const char *cond,
81*7c3d14c8STreehugger Robot u64 v1, u64 v2);
82*7c3d14c8STreehugger Robot
83*7c3d14c8STreehugger Robot const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
84*7c3d14c8STreehugger Robot
85*7c3d14c8STreehugger Robot // FastState (from most significant bit):
86*7c3d14c8STreehugger Robot // ignore : 1
87*7c3d14c8STreehugger Robot // tid : kTidBits
88*7c3d14c8STreehugger Robot // unused : -
89*7c3d14c8STreehugger Robot // history_size : 3
90*7c3d14c8STreehugger Robot // epoch : kClkBits
91*7c3d14c8STreehugger Robot class FastState {
92*7c3d14c8STreehugger Robot public:
FastState(u64 tid,u64 epoch)93*7c3d14c8STreehugger Robot FastState(u64 tid, u64 epoch) {
94*7c3d14c8STreehugger Robot x_ = tid << kTidShift;
95*7c3d14c8STreehugger Robot x_ |= epoch;
96*7c3d14c8STreehugger Robot DCHECK_EQ(tid, this->tid());
97*7c3d14c8STreehugger Robot DCHECK_EQ(epoch, this->epoch());
98*7c3d14c8STreehugger Robot DCHECK_EQ(GetIgnoreBit(), false);
99*7c3d14c8STreehugger Robot }
100*7c3d14c8STreehugger Robot
FastState(u64 x)101*7c3d14c8STreehugger Robot explicit FastState(u64 x)
102*7c3d14c8STreehugger Robot : x_(x) {
103*7c3d14c8STreehugger Robot }
104*7c3d14c8STreehugger Robot
raw()105*7c3d14c8STreehugger Robot u64 raw() const {
106*7c3d14c8STreehugger Robot return x_;
107*7c3d14c8STreehugger Robot }
108*7c3d14c8STreehugger Robot
tid()109*7c3d14c8STreehugger Robot u64 tid() const {
110*7c3d14c8STreehugger Robot u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
111*7c3d14c8STreehugger Robot return res;
112*7c3d14c8STreehugger Robot }
113*7c3d14c8STreehugger Robot
TidWithIgnore()114*7c3d14c8STreehugger Robot u64 TidWithIgnore() const {
115*7c3d14c8STreehugger Robot u64 res = x_ >> kTidShift;
116*7c3d14c8STreehugger Robot return res;
117*7c3d14c8STreehugger Robot }
118*7c3d14c8STreehugger Robot
epoch()119*7c3d14c8STreehugger Robot u64 epoch() const {
120*7c3d14c8STreehugger Robot u64 res = x_ & ((1ull << kClkBits) - 1);
121*7c3d14c8STreehugger Robot return res;
122*7c3d14c8STreehugger Robot }
123*7c3d14c8STreehugger Robot
IncrementEpoch()124*7c3d14c8STreehugger Robot void IncrementEpoch() {
125*7c3d14c8STreehugger Robot u64 old_epoch = epoch();
126*7c3d14c8STreehugger Robot x_ += 1;
127*7c3d14c8STreehugger Robot DCHECK_EQ(old_epoch + 1, epoch());
128*7c3d14c8STreehugger Robot (void)old_epoch;
129*7c3d14c8STreehugger Robot }
130*7c3d14c8STreehugger Robot
SetIgnoreBit()131*7c3d14c8STreehugger Robot void SetIgnoreBit() { x_ |= kIgnoreBit; }
ClearIgnoreBit()132*7c3d14c8STreehugger Robot void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
GetIgnoreBit()133*7c3d14c8STreehugger Robot bool GetIgnoreBit() const { return (s64)x_ < 0; }
134*7c3d14c8STreehugger Robot
SetHistorySize(int hs)135*7c3d14c8STreehugger Robot void SetHistorySize(int hs) {
136*7c3d14c8STreehugger Robot CHECK_GE(hs, 0);
137*7c3d14c8STreehugger Robot CHECK_LE(hs, 7);
138*7c3d14c8STreehugger Robot x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
139*7c3d14c8STreehugger Robot }
140*7c3d14c8STreehugger Robot
141*7c3d14c8STreehugger Robot ALWAYS_INLINE
GetHistorySize()142*7c3d14c8STreehugger Robot int GetHistorySize() const {
143*7c3d14c8STreehugger Robot return (int)((x_ >> kHistoryShift) & kHistoryMask);
144*7c3d14c8STreehugger Robot }
145*7c3d14c8STreehugger Robot
ClearHistorySize()146*7c3d14c8STreehugger Robot void ClearHistorySize() {
147*7c3d14c8STreehugger Robot SetHistorySize(0);
148*7c3d14c8STreehugger Robot }
149*7c3d14c8STreehugger Robot
150*7c3d14c8STreehugger Robot ALWAYS_INLINE
GetTracePos()151*7c3d14c8STreehugger Robot u64 GetTracePos() const {
152*7c3d14c8STreehugger Robot const int hs = GetHistorySize();
153*7c3d14c8STreehugger Robot // When hs == 0, the trace consists of 2 parts.
154*7c3d14c8STreehugger Robot const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
155*7c3d14c8STreehugger Robot return epoch() & mask;
156*7c3d14c8STreehugger Robot }
157*7c3d14c8STreehugger Robot
158*7c3d14c8STreehugger Robot private:
159*7c3d14c8STreehugger Robot friend class Shadow;
160*7c3d14c8STreehugger Robot static const int kTidShift = 64 - kTidBits - 1;
161*7c3d14c8STreehugger Robot static const u64 kIgnoreBit = 1ull << 63;
162*7c3d14c8STreehugger Robot static const u64 kFreedBit = 1ull << 63;
163*7c3d14c8STreehugger Robot static const u64 kHistoryShift = kClkBits;
164*7c3d14c8STreehugger Robot static const u64 kHistoryMask = 7;
165*7c3d14c8STreehugger Robot u64 x_;
166*7c3d14c8STreehugger Robot };
167*7c3d14c8STreehugger Robot
168*7c3d14c8STreehugger Robot // Shadow (from most significant bit):
169*7c3d14c8STreehugger Robot // freed : 1
170*7c3d14c8STreehugger Robot // tid : kTidBits
171*7c3d14c8STreehugger Robot // is_atomic : 1
172*7c3d14c8STreehugger Robot // is_read : 1
173*7c3d14c8STreehugger Robot // size_log : 2
174*7c3d14c8STreehugger Robot // addr0 : 3
175*7c3d14c8STreehugger Robot // epoch : kClkBits
176*7c3d14c8STreehugger Robot class Shadow : public FastState {
177*7c3d14c8STreehugger Robot public:
Shadow(u64 x)178*7c3d14c8STreehugger Robot explicit Shadow(u64 x)
179*7c3d14c8STreehugger Robot : FastState(x) {
180*7c3d14c8STreehugger Robot }
181*7c3d14c8STreehugger Robot
Shadow(const FastState & s)182*7c3d14c8STreehugger Robot explicit Shadow(const FastState &s)
183*7c3d14c8STreehugger Robot : FastState(s.x_) {
184*7c3d14c8STreehugger Robot ClearHistorySize();
185*7c3d14c8STreehugger Robot }
186*7c3d14c8STreehugger Robot
SetAddr0AndSizeLog(u64 addr0,unsigned kAccessSizeLog)187*7c3d14c8STreehugger Robot void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
188*7c3d14c8STreehugger Robot DCHECK_EQ((x_ >> kClkBits) & 31, 0);
189*7c3d14c8STreehugger Robot DCHECK_LE(addr0, 7);
190*7c3d14c8STreehugger Robot DCHECK_LE(kAccessSizeLog, 3);
191*7c3d14c8STreehugger Robot x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
192*7c3d14c8STreehugger Robot DCHECK_EQ(kAccessSizeLog, size_log());
193*7c3d14c8STreehugger Robot DCHECK_EQ(addr0, this->addr0());
194*7c3d14c8STreehugger Robot }
195*7c3d14c8STreehugger Robot
SetWrite(unsigned kAccessIsWrite)196*7c3d14c8STreehugger Robot void SetWrite(unsigned kAccessIsWrite) {
197*7c3d14c8STreehugger Robot DCHECK_EQ(x_ & kReadBit, 0);
198*7c3d14c8STreehugger Robot if (!kAccessIsWrite)
199*7c3d14c8STreehugger Robot x_ |= kReadBit;
200*7c3d14c8STreehugger Robot DCHECK_EQ(kAccessIsWrite, IsWrite());
201*7c3d14c8STreehugger Robot }
202*7c3d14c8STreehugger Robot
SetAtomic(bool kIsAtomic)203*7c3d14c8STreehugger Robot void SetAtomic(bool kIsAtomic) {
204*7c3d14c8STreehugger Robot DCHECK(!IsAtomic());
205*7c3d14c8STreehugger Robot if (kIsAtomic)
206*7c3d14c8STreehugger Robot x_ |= kAtomicBit;
207*7c3d14c8STreehugger Robot DCHECK_EQ(IsAtomic(), kIsAtomic);
208*7c3d14c8STreehugger Robot }
209*7c3d14c8STreehugger Robot
IsAtomic()210*7c3d14c8STreehugger Robot bool IsAtomic() const {
211*7c3d14c8STreehugger Robot return x_ & kAtomicBit;
212*7c3d14c8STreehugger Robot }
213*7c3d14c8STreehugger Robot
IsZero()214*7c3d14c8STreehugger Robot bool IsZero() const {
215*7c3d14c8STreehugger Robot return x_ == 0;
216*7c3d14c8STreehugger Robot }
217*7c3d14c8STreehugger Robot
TidsAreEqual(const Shadow s1,const Shadow s2)218*7c3d14c8STreehugger Robot static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
219*7c3d14c8STreehugger Robot u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
220*7c3d14c8STreehugger Robot DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
221*7c3d14c8STreehugger Robot return shifted_xor == 0;
222*7c3d14c8STreehugger Robot }
223*7c3d14c8STreehugger Robot
224*7c3d14c8STreehugger Robot static ALWAYS_INLINE
Addr0AndSizeAreEqual(const Shadow s1,const Shadow s2)225*7c3d14c8STreehugger Robot bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
226*7c3d14c8STreehugger Robot u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
227*7c3d14c8STreehugger Robot return masked_xor == 0;
228*7c3d14c8STreehugger Robot }
229*7c3d14c8STreehugger Robot
TwoRangesIntersect(Shadow s1,Shadow s2,unsigned kS2AccessSize)230*7c3d14c8STreehugger Robot static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
231*7c3d14c8STreehugger Robot unsigned kS2AccessSize) {
232*7c3d14c8STreehugger Robot bool res = false;
233*7c3d14c8STreehugger Robot u64 diff = s1.addr0() - s2.addr0();
234*7c3d14c8STreehugger Robot if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT
235*7c3d14c8STreehugger Robot // if (s1.addr0() + size1) > s2.addr0()) return true;
236*7c3d14c8STreehugger Robot if (s1.size() > -diff)
237*7c3d14c8STreehugger Robot res = true;
238*7c3d14c8STreehugger Robot } else {
239*7c3d14c8STreehugger Robot // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
240*7c3d14c8STreehugger Robot if (kS2AccessSize > diff)
241*7c3d14c8STreehugger Robot res = true;
242*7c3d14c8STreehugger Robot }
243*7c3d14c8STreehugger Robot DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
244*7c3d14c8STreehugger Robot DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
245*7c3d14c8STreehugger Robot return res;
246*7c3d14c8STreehugger Robot }
247*7c3d14c8STreehugger Robot
addr0()248*7c3d14c8STreehugger Robot u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
size()249*7c3d14c8STreehugger Robot u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
IsWrite()250*7c3d14c8STreehugger Robot bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
IsRead()251*7c3d14c8STreehugger Robot bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
252*7c3d14c8STreehugger Robot
253*7c3d14c8STreehugger Robot // The idea behind the freed bit is as follows.
254*7c3d14c8STreehugger Robot // When the memory is freed (or otherwise unaccessible) we write to the shadow
255*7c3d14c8STreehugger Robot // values with tid/epoch related to the free and the freed bit set.
256*7c3d14c8STreehugger Robot // During memory accesses processing the freed bit is considered
257*7c3d14c8STreehugger Robot // as msb of tid. So any access races with shadow with freed bit set
258*7c3d14c8STreehugger Robot // (it is as if write from a thread with which we never synchronized before).
259*7c3d14c8STreehugger Robot // This allows us to detect accesses to freed memory w/o additional
260*7c3d14c8STreehugger Robot // overheads in memory access processing and at the same time restore
261*7c3d14c8STreehugger Robot // tid/epoch of free.
MarkAsFreed()262*7c3d14c8STreehugger Robot void MarkAsFreed() {
263*7c3d14c8STreehugger Robot x_ |= kFreedBit;
264*7c3d14c8STreehugger Robot }
265*7c3d14c8STreehugger Robot
IsFreed()266*7c3d14c8STreehugger Robot bool IsFreed() const {
267*7c3d14c8STreehugger Robot return x_ & kFreedBit;
268*7c3d14c8STreehugger Robot }
269*7c3d14c8STreehugger Robot
GetFreedAndReset()270*7c3d14c8STreehugger Robot bool GetFreedAndReset() {
271*7c3d14c8STreehugger Robot bool res = x_ & kFreedBit;
272*7c3d14c8STreehugger Robot x_ &= ~kFreedBit;
273*7c3d14c8STreehugger Robot return res;
274*7c3d14c8STreehugger Robot }
275*7c3d14c8STreehugger Robot
IsBothReadsOrAtomic(bool kIsWrite,bool kIsAtomic)276*7c3d14c8STreehugger Robot bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
277*7c3d14c8STreehugger Robot bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
278*7c3d14c8STreehugger Robot | (u64(kIsAtomic) << kAtomicShift));
279*7c3d14c8STreehugger Robot DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
280*7c3d14c8STreehugger Robot return v;
281*7c3d14c8STreehugger Robot }
282*7c3d14c8STreehugger Robot
IsRWNotWeaker(bool kIsWrite,bool kIsAtomic)283*7c3d14c8STreehugger Robot bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
284*7c3d14c8STreehugger Robot bool v = ((x_ >> kReadShift) & 3)
285*7c3d14c8STreehugger Robot <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
286*7c3d14c8STreehugger Robot DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
287*7c3d14c8STreehugger Robot (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
288*7c3d14c8STreehugger Robot return v;
289*7c3d14c8STreehugger Robot }
290*7c3d14c8STreehugger Robot
IsRWWeakerOrEqual(bool kIsWrite,bool kIsAtomic)291*7c3d14c8STreehugger Robot bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
292*7c3d14c8STreehugger Robot bool v = ((x_ >> kReadShift) & 3)
293*7c3d14c8STreehugger Robot >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
294*7c3d14c8STreehugger Robot DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
295*7c3d14c8STreehugger Robot (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
296*7c3d14c8STreehugger Robot return v;
297*7c3d14c8STreehugger Robot }
298*7c3d14c8STreehugger Robot
299*7c3d14c8STreehugger Robot private:
300*7c3d14c8STreehugger Robot static const u64 kReadShift = 5 + kClkBits;
301*7c3d14c8STreehugger Robot static const u64 kReadBit = 1ull << kReadShift;
302*7c3d14c8STreehugger Robot static const u64 kAtomicShift = 6 + kClkBits;
303*7c3d14c8STreehugger Robot static const u64 kAtomicBit = 1ull << kAtomicShift;
304*7c3d14c8STreehugger Robot
size_log()305*7c3d14c8STreehugger Robot u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
306*7c3d14c8STreehugger Robot
TwoRangesIntersectSlow(const Shadow s1,const Shadow s2)307*7c3d14c8STreehugger Robot static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
308*7c3d14c8STreehugger Robot if (s1.addr0() == s2.addr0()) return true;
309*7c3d14c8STreehugger Robot if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
310*7c3d14c8STreehugger Robot return true;
311*7c3d14c8STreehugger Robot if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
312*7c3d14c8STreehugger Robot return true;
313*7c3d14c8STreehugger Robot return false;
314*7c3d14c8STreehugger Robot }
315*7c3d14c8STreehugger Robot };
316*7c3d14c8STreehugger Robot
317*7c3d14c8STreehugger Robot struct ThreadSignalContext;
318*7c3d14c8STreehugger Robot
319*7c3d14c8STreehugger Robot struct JmpBuf {
320*7c3d14c8STreehugger Robot uptr sp;
321*7c3d14c8STreehugger Robot uptr mangled_sp;
322*7c3d14c8STreehugger Robot int int_signal_send;
323*7c3d14c8STreehugger Robot bool in_blocking_func;
324*7c3d14c8STreehugger Robot uptr in_signal_handler;
325*7c3d14c8STreehugger Robot uptr *shadow_stack_pos;
326*7c3d14c8STreehugger Robot };
327*7c3d14c8STreehugger Robot
328*7c3d14c8STreehugger Robot // A Processor represents a physical thread, or a P for Go.
329*7c3d14c8STreehugger Robot // It is used to store internal resources like allocate cache, and does not
330*7c3d14c8STreehugger Robot // participate in race-detection logic (invisible to end user).
331*7c3d14c8STreehugger Robot // In C++ it is tied to an OS thread just like ThreadState, however ideally
332*7c3d14c8STreehugger Robot // it should be tied to a CPU (this way we will have fewer allocator caches).
333*7c3d14c8STreehugger Robot // In Go it is tied to a P, so there are significantly fewer Processor's than
334*7c3d14c8STreehugger Robot // ThreadState's (which are tied to Gs).
335*7c3d14c8STreehugger Robot // A ThreadState must be wired with a Processor to handle events.
336*7c3d14c8STreehugger Robot struct Processor {
337*7c3d14c8STreehugger Robot ThreadState *thr; // currently wired thread, or nullptr
338*7c3d14c8STreehugger Robot #ifndef SANITIZER_GO
339*7c3d14c8STreehugger Robot AllocatorCache alloc_cache;
340*7c3d14c8STreehugger Robot InternalAllocatorCache internal_alloc_cache;
341*7c3d14c8STreehugger Robot #endif
342*7c3d14c8STreehugger Robot DenseSlabAllocCache block_cache;
343*7c3d14c8STreehugger Robot DenseSlabAllocCache sync_cache;
344*7c3d14c8STreehugger Robot DenseSlabAllocCache clock_cache;
345*7c3d14c8STreehugger Robot DDPhysicalThread *dd_pt;
346*7c3d14c8STreehugger Robot };
347*7c3d14c8STreehugger Robot
348*7c3d14c8STreehugger Robot #ifndef SANITIZER_GO
349*7c3d14c8STreehugger Robot // ScopedGlobalProcessor temporary setups a global processor for the current
350*7c3d14c8STreehugger Robot // thread, if it does not have one. Intended for interceptors that can run
351*7c3d14c8STreehugger Robot // at the very thread end, when we already destroyed the thread processor.
352*7c3d14c8STreehugger Robot struct ScopedGlobalProcessor {
353*7c3d14c8STreehugger Robot ScopedGlobalProcessor();
354*7c3d14c8STreehugger Robot ~ScopedGlobalProcessor();
355*7c3d14c8STreehugger Robot };
356*7c3d14c8STreehugger Robot #endif
357*7c3d14c8STreehugger Robot
358*7c3d14c8STreehugger Robot // This struct is stored in TLS.
359*7c3d14c8STreehugger Robot struct ThreadState {
360*7c3d14c8STreehugger Robot FastState fast_state;
361*7c3d14c8STreehugger Robot // Synch epoch represents the threads's epoch before the last synchronization
362*7c3d14c8STreehugger Robot // action. It allows to reduce number of shadow state updates.
363*7c3d14c8STreehugger Robot // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
364*7c3d14c8STreehugger Robot // if we are processing write to X from the same thread at epoch=200,
365*7c3d14c8STreehugger Robot // we do nothing, because both writes happen in the same 'synch epoch'.
366*7c3d14c8STreehugger Robot // That is, if another memory access does not race with the former write,
367*7c3d14c8STreehugger Robot // it does not race with the latter as well.
368*7c3d14c8STreehugger Robot // QUESTION: can we can squeeze this into ThreadState::Fast?
369*7c3d14c8STreehugger Robot // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
370*7c3d14c8STreehugger Robot // taken by epoch between synchs.
371*7c3d14c8STreehugger Robot // This way we can save one load from tls.
372*7c3d14c8STreehugger Robot u64 fast_synch_epoch;
373*7c3d14c8STreehugger Robot // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
374*7c3d14c8STreehugger Robot // We do not distinguish beteween ignoring reads and writes
375*7c3d14c8STreehugger Robot // for better performance.
376*7c3d14c8STreehugger Robot int ignore_reads_and_writes;
377*7c3d14c8STreehugger Robot int ignore_sync;
378*7c3d14c8STreehugger Robot // Go does not support ignores.
379*7c3d14c8STreehugger Robot #ifndef SANITIZER_GO
380*7c3d14c8STreehugger Robot IgnoreSet mop_ignore_set;
381*7c3d14c8STreehugger Robot IgnoreSet sync_ignore_set;
382*7c3d14c8STreehugger Robot #endif
383*7c3d14c8STreehugger Robot // C/C++ uses fixed size shadow stack embed into Trace.
384*7c3d14c8STreehugger Robot // Go uses malloc-allocated shadow stack with dynamic size.
385*7c3d14c8STreehugger Robot uptr *shadow_stack;
386*7c3d14c8STreehugger Robot uptr *shadow_stack_end;
387*7c3d14c8STreehugger Robot uptr *shadow_stack_pos;
388*7c3d14c8STreehugger Robot u64 *racy_shadow_addr;
389*7c3d14c8STreehugger Robot u64 racy_state[2];
390*7c3d14c8STreehugger Robot MutexSet mset;
391*7c3d14c8STreehugger Robot ThreadClock clock;
392*7c3d14c8STreehugger Robot #ifndef SANITIZER_GO
393*7c3d14c8STreehugger Robot Vector<JmpBuf> jmp_bufs;
394*7c3d14c8STreehugger Robot int ignore_interceptors;
395*7c3d14c8STreehugger Robot #endif
396*7c3d14c8STreehugger Robot #if TSAN_COLLECT_STATS
397*7c3d14c8STreehugger Robot u64 stat[StatCnt];
398*7c3d14c8STreehugger Robot #endif
399*7c3d14c8STreehugger Robot const int tid;
400*7c3d14c8STreehugger Robot const int unique_id;
401*7c3d14c8STreehugger Robot bool in_symbolizer;
402*7c3d14c8STreehugger Robot bool in_ignored_lib;
403*7c3d14c8STreehugger Robot bool is_inited;
404*7c3d14c8STreehugger Robot bool is_dead;
405*7c3d14c8STreehugger Robot bool is_freeing;
406*7c3d14c8STreehugger Robot bool is_vptr_access;
407*7c3d14c8STreehugger Robot const uptr stk_addr;
408*7c3d14c8STreehugger Robot const uptr stk_size;
409*7c3d14c8STreehugger Robot const uptr tls_addr;
410*7c3d14c8STreehugger Robot const uptr tls_size;
411*7c3d14c8STreehugger Robot ThreadContext *tctx;
412*7c3d14c8STreehugger Robot
413*7c3d14c8STreehugger Robot #if SANITIZER_DEBUG && !SANITIZER_GO
414*7c3d14c8STreehugger Robot InternalDeadlockDetector internal_deadlock_detector;
415*7c3d14c8STreehugger Robot #endif
416*7c3d14c8STreehugger Robot DDLogicalThread *dd_lt;
417*7c3d14c8STreehugger Robot
418*7c3d14c8STreehugger Robot // Current wired Processor, or nullptr. Required to handle any events.
419*7c3d14c8STreehugger Robot Processor *proc1;
420*7c3d14c8STreehugger Robot #ifndef SANITIZER_GO
procThreadState421*7c3d14c8STreehugger Robot Processor *proc() { return proc1; }
422*7c3d14c8STreehugger Robot #else
423*7c3d14c8STreehugger Robot Processor *proc();
424*7c3d14c8STreehugger Robot #endif
425*7c3d14c8STreehugger Robot
426*7c3d14c8STreehugger Robot atomic_uintptr_t in_signal_handler;
427*7c3d14c8STreehugger Robot ThreadSignalContext *signal_ctx;
428*7c3d14c8STreehugger Robot
429*7c3d14c8STreehugger Robot #ifndef SANITIZER_GO
430*7c3d14c8STreehugger Robot u32 last_sleep_stack_id;
431*7c3d14c8STreehugger Robot ThreadClock last_sleep_clock;
432*7c3d14c8STreehugger Robot #endif
433*7c3d14c8STreehugger Robot
434*7c3d14c8STreehugger Robot // Set in regions of runtime that must be signal-safe and fork-safe.
435*7c3d14c8STreehugger Robot // If set, malloc must not be called.
436*7c3d14c8STreehugger Robot int nomalloc;
437*7c3d14c8STreehugger Robot
438*7c3d14c8STreehugger Robot const ReportDesc *current_report;
439*7c3d14c8STreehugger Robot
440*7c3d14c8STreehugger Robot explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
441*7c3d14c8STreehugger Robot unsigned reuse_count,
442*7c3d14c8STreehugger Robot uptr stk_addr, uptr stk_size,
443*7c3d14c8STreehugger Robot uptr tls_addr, uptr tls_size);
444*7c3d14c8STreehugger Robot };
445*7c3d14c8STreehugger Robot
446*7c3d14c8STreehugger Robot #ifndef SANITIZER_GO
447*7c3d14c8STreehugger Robot #if SANITIZER_MAC || SANITIZER_ANDROID
448*7c3d14c8STreehugger Robot ThreadState *cur_thread();
449*7c3d14c8STreehugger Robot void cur_thread_finalize();
450*7c3d14c8STreehugger Robot #else
451*7c3d14c8STreehugger Robot __attribute__((tls_model("initial-exec")))
452*7c3d14c8STreehugger Robot extern THREADLOCAL char cur_thread_placeholder[];
cur_thread()453*7c3d14c8STreehugger Robot INLINE ThreadState *cur_thread() {
454*7c3d14c8STreehugger Robot return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
455*7c3d14c8STreehugger Robot }
cur_thread_finalize()456*7c3d14c8STreehugger Robot INLINE void cur_thread_finalize() { }
457*7c3d14c8STreehugger Robot #endif // SANITIZER_MAC || SANITIZER_ANDROID
458*7c3d14c8STreehugger Robot #endif // SANITIZER_GO
459*7c3d14c8STreehugger Robot
460*7c3d14c8STreehugger Robot class ThreadContext : public ThreadContextBase {
461*7c3d14c8STreehugger Robot public:
462*7c3d14c8STreehugger Robot explicit ThreadContext(int tid);
463*7c3d14c8STreehugger Robot ~ThreadContext();
464*7c3d14c8STreehugger Robot ThreadState *thr;
465*7c3d14c8STreehugger Robot u32 creation_stack_id;
466*7c3d14c8STreehugger Robot SyncClock sync;
467*7c3d14c8STreehugger Robot // Epoch at which the thread had started.
468*7c3d14c8STreehugger Robot // If we see an event from the thread stamped by an older epoch,
469*7c3d14c8STreehugger Robot // the event is from a dead thread that shared tid with this thread.
470*7c3d14c8STreehugger Robot u64 epoch0;
471*7c3d14c8STreehugger Robot u64 epoch1;
472*7c3d14c8STreehugger Robot
473*7c3d14c8STreehugger Robot // Override superclass callbacks.
474*7c3d14c8STreehugger Robot void OnDead() override;
475*7c3d14c8STreehugger Robot void OnJoined(void *arg) override;
476*7c3d14c8STreehugger Robot void OnFinished() override;
477*7c3d14c8STreehugger Robot void OnStarted(void *arg) override;
478*7c3d14c8STreehugger Robot void OnCreated(void *arg) override;
479*7c3d14c8STreehugger Robot void OnReset() override;
480*7c3d14c8STreehugger Robot void OnDetached(void *arg) override;
481*7c3d14c8STreehugger Robot };
482*7c3d14c8STreehugger Robot
483*7c3d14c8STreehugger Robot struct RacyStacks {
484*7c3d14c8STreehugger Robot MD5Hash hash[2];
485*7c3d14c8STreehugger Robot bool operator==(const RacyStacks &other) const {
486*7c3d14c8STreehugger Robot if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
487*7c3d14c8STreehugger Robot return true;
488*7c3d14c8STreehugger Robot if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
489*7c3d14c8STreehugger Robot return true;
490*7c3d14c8STreehugger Robot return false;
491*7c3d14c8STreehugger Robot }
492*7c3d14c8STreehugger Robot };
493*7c3d14c8STreehugger Robot
494*7c3d14c8STreehugger Robot struct RacyAddress {
495*7c3d14c8STreehugger Robot uptr addr_min;
496*7c3d14c8STreehugger Robot uptr addr_max;
497*7c3d14c8STreehugger Robot };
498*7c3d14c8STreehugger Robot
499*7c3d14c8STreehugger Robot struct FiredSuppression {
500*7c3d14c8STreehugger Robot ReportType type;
501*7c3d14c8STreehugger Robot uptr pc_or_addr;
502*7c3d14c8STreehugger Robot Suppression *supp;
503*7c3d14c8STreehugger Robot };
504*7c3d14c8STreehugger Robot
505*7c3d14c8STreehugger Robot struct Context {
506*7c3d14c8STreehugger Robot Context();
507*7c3d14c8STreehugger Robot
508*7c3d14c8STreehugger Robot bool initialized;
509*7c3d14c8STreehugger Robot bool after_multithreaded_fork;
510*7c3d14c8STreehugger Robot
511*7c3d14c8STreehugger Robot MetaMap metamap;
512*7c3d14c8STreehugger Robot
513*7c3d14c8STreehugger Robot Mutex report_mtx;
514*7c3d14c8STreehugger Robot int nreported;
515*7c3d14c8STreehugger Robot int nmissed_expected;
516*7c3d14c8STreehugger Robot atomic_uint64_t last_symbolize_time_ns;
517*7c3d14c8STreehugger Robot
518*7c3d14c8STreehugger Robot void *background_thread;
519*7c3d14c8STreehugger Robot atomic_uint32_t stop_background_thread;
520*7c3d14c8STreehugger Robot
521*7c3d14c8STreehugger Robot ThreadRegistry *thread_registry;
522*7c3d14c8STreehugger Robot
523*7c3d14c8STreehugger Robot Mutex racy_mtx;
524*7c3d14c8STreehugger Robot Vector<RacyStacks> racy_stacks;
525*7c3d14c8STreehugger Robot Vector<RacyAddress> racy_addresses;
526*7c3d14c8STreehugger Robot // Number of fired suppressions may be large enough.
527*7c3d14c8STreehugger Robot Mutex fired_suppressions_mtx;
528*7c3d14c8STreehugger Robot InternalMmapVector<FiredSuppression> fired_suppressions;
529*7c3d14c8STreehugger Robot DDetector *dd;
530*7c3d14c8STreehugger Robot
531*7c3d14c8STreehugger Robot ClockAlloc clock_alloc;
532*7c3d14c8STreehugger Robot
533*7c3d14c8STreehugger Robot Flags flags;
534*7c3d14c8STreehugger Robot
535*7c3d14c8STreehugger Robot u64 stat[StatCnt];
536*7c3d14c8STreehugger Robot u64 int_alloc_cnt[MBlockTypeCount];
537*7c3d14c8STreehugger Robot u64 int_alloc_siz[MBlockTypeCount];
538*7c3d14c8STreehugger Robot };
539*7c3d14c8STreehugger Robot
540*7c3d14c8STreehugger Robot extern Context *ctx; // The one and the only global runtime context.
541*7c3d14c8STreehugger Robot
542*7c3d14c8STreehugger Robot struct ScopedIgnoreInterceptors {
ScopedIgnoreInterceptorsScopedIgnoreInterceptors543*7c3d14c8STreehugger Robot ScopedIgnoreInterceptors() {
544*7c3d14c8STreehugger Robot #ifndef SANITIZER_GO
545*7c3d14c8STreehugger Robot cur_thread()->ignore_interceptors++;
546*7c3d14c8STreehugger Robot #endif
547*7c3d14c8STreehugger Robot }
548*7c3d14c8STreehugger Robot
~ScopedIgnoreInterceptorsScopedIgnoreInterceptors549*7c3d14c8STreehugger Robot ~ScopedIgnoreInterceptors() {
550*7c3d14c8STreehugger Robot #ifndef SANITIZER_GO
551*7c3d14c8STreehugger Robot cur_thread()->ignore_interceptors--;
552*7c3d14c8STreehugger Robot #endif
553*7c3d14c8STreehugger Robot }
554*7c3d14c8STreehugger Robot };
555*7c3d14c8STreehugger Robot
556*7c3d14c8STreehugger Robot class ScopedReport {
557*7c3d14c8STreehugger Robot public:
558*7c3d14c8STreehugger Robot explicit ScopedReport(ReportType typ);
559*7c3d14c8STreehugger Robot ~ScopedReport();
560*7c3d14c8STreehugger Robot
561*7c3d14c8STreehugger Robot void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
562*7c3d14c8STreehugger Robot const MutexSet *mset);
563*7c3d14c8STreehugger Robot void AddStack(StackTrace stack, bool suppressable = false);
564*7c3d14c8STreehugger Robot void AddThread(const ThreadContext *tctx, bool suppressable = false);
565*7c3d14c8STreehugger Robot void AddThread(int unique_tid, bool suppressable = false);
566*7c3d14c8STreehugger Robot void AddUniqueTid(int unique_tid);
567*7c3d14c8STreehugger Robot void AddMutex(const SyncVar *s);
568*7c3d14c8STreehugger Robot u64 AddMutex(u64 id);
569*7c3d14c8STreehugger Robot void AddLocation(uptr addr, uptr size);
570*7c3d14c8STreehugger Robot void AddSleep(u32 stack_id);
571*7c3d14c8STreehugger Robot void SetCount(int count);
572*7c3d14c8STreehugger Robot
573*7c3d14c8STreehugger Robot const ReportDesc *GetReport() const;
574*7c3d14c8STreehugger Robot
575*7c3d14c8STreehugger Robot private:
576*7c3d14c8STreehugger Robot ReportDesc *rep_;
577*7c3d14c8STreehugger Robot // Symbolizer makes lots of intercepted calls. If we try to process them,
578*7c3d14c8STreehugger Robot // at best it will cause deadlocks on internal mutexes.
579*7c3d14c8STreehugger Robot ScopedIgnoreInterceptors ignore_interceptors_;
580*7c3d14c8STreehugger Robot
581*7c3d14c8STreehugger Robot void AddDeadMutex(u64 id);
582*7c3d14c8STreehugger Robot
583*7c3d14c8STreehugger Robot ScopedReport(const ScopedReport&);
584*7c3d14c8STreehugger Robot void operator = (const ScopedReport&);
585*7c3d14c8STreehugger Robot };
586*7c3d14c8STreehugger Robot
587*7c3d14c8STreehugger Robot void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
588*7c3d14c8STreehugger Robot MutexSet *mset);
589*7c3d14c8STreehugger Robot
590*7c3d14c8STreehugger Robot template<typename StackTraceTy>
ObtainCurrentStack(ThreadState * thr,uptr toppc,StackTraceTy * stack)591*7c3d14c8STreehugger Robot void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) {
592*7c3d14c8STreehugger Robot uptr size = thr->shadow_stack_pos - thr->shadow_stack;
593*7c3d14c8STreehugger Robot uptr start = 0;
594*7c3d14c8STreehugger Robot if (size + !!toppc > kStackTraceMax) {
595*7c3d14c8STreehugger Robot start = size + !!toppc - kStackTraceMax;
596*7c3d14c8STreehugger Robot size = kStackTraceMax - !!toppc;
597*7c3d14c8STreehugger Robot }
598*7c3d14c8STreehugger Robot stack->Init(&thr->shadow_stack[start], size, toppc);
599*7c3d14c8STreehugger Robot }
600*7c3d14c8STreehugger Robot
601*7c3d14c8STreehugger Robot
602*7c3d14c8STreehugger Robot #if TSAN_COLLECT_STATS
603*7c3d14c8STreehugger Robot void StatAggregate(u64 *dst, u64 *src);
604*7c3d14c8STreehugger Robot void StatOutput(u64 *stat);
605*7c3d14c8STreehugger Robot #endif
606*7c3d14c8STreehugger Robot
607*7c3d14c8STreehugger Robot void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
608*7c3d14c8STreehugger Robot #if TSAN_COLLECT_STATS
609*7c3d14c8STreehugger Robot thr->stat[typ] += n;
610*7c3d14c8STreehugger Robot #endif
611*7c3d14c8STreehugger Robot }
StatSet(ThreadState * thr,StatType typ,u64 n)612*7c3d14c8STreehugger Robot void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
613*7c3d14c8STreehugger Robot #if TSAN_COLLECT_STATS
614*7c3d14c8STreehugger Robot thr->stat[typ] = n;
615*7c3d14c8STreehugger Robot #endif
616*7c3d14c8STreehugger Robot }
617*7c3d14c8STreehugger Robot
618*7c3d14c8STreehugger Robot void MapShadow(uptr addr, uptr size);
619*7c3d14c8STreehugger Robot void MapThreadTrace(uptr addr, uptr size, const char *name);
620*7c3d14c8STreehugger Robot void DontNeedShadowFor(uptr addr, uptr size);
621*7c3d14c8STreehugger Robot void InitializeShadowMemory();
622*7c3d14c8STreehugger Robot void InitializeInterceptors();
623*7c3d14c8STreehugger Robot void InitializeLibIgnore();
624*7c3d14c8STreehugger Robot void InitializeDynamicAnnotations();
625*7c3d14c8STreehugger Robot
626*7c3d14c8STreehugger Robot void ForkBefore(ThreadState *thr, uptr pc);
627*7c3d14c8STreehugger Robot void ForkParentAfter(ThreadState *thr, uptr pc);
628*7c3d14c8STreehugger Robot void ForkChildAfter(ThreadState *thr, uptr pc);
629*7c3d14c8STreehugger Robot
630*7c3d14c8STreehugger Robot void ReportRace(ThreadState *thr);
631*7c3d14c8STreehugger Robot bool OutputReport(ThreadState *thr, const ScopedReport &srep);
632*7c3d14c8STreehugger Robot bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
633*7c3d14c8STreehugger Robot bool IsExpectedReport(uptr addr, uptr size);
634*7c3d14c8STreehugger Robot void PrintMatchedBenignRaces();
635*7c3d14c8STreehugger Robot
636*7c3d14c8STreehugger Robot #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
637*7c3d14c8STreehugger Robot # define DPrintf Printf
638*7c3d14c8STreehugger Robot #else
639*7c3d14c8STreehugger Robot # define DPrintf(...)
640*7c3d14c8STreehugger Robot #endif
641*7c3d14c8STreehugger Robot
642*7c3d14c8STreehugger Robot #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
643*7c3d14c8STreehugger Robot # define DPrintf2 Printf
644*7c3d14c8STreehugger Robot #else
645*7c3d14c8STreehugger Robot # define DPrintf2(...)
646*7c3d14c8STreehugger Robot #endif
647*7c3d14c8STreehugger Robot
648*7c3d14c8STreehugger Robot u32 CurrentStackId(ThreadState *thr, uptr pc);
649*7c3d14c8STreehugger Robot ReportStack *SymbolizeStackId(u32 stack_id);
650*7c3d14c8STreehugger Robot void PrintCurrentStack(ThreadState *thr, uptr pc);
651*7c3d14c8STreehugger Robot void PrintCurrentStackSlow(uptr pc); // uses libunwind
652*7c3d14c8STreehugger Robot
653*7c3d14c8STreehugger Robot void Initialize(ThreadState *thr);
654*7c3d14c8STreehugger Robot int Finalize(ThreadState *thr);
655*7c3d14c8STreehugger Robot
656*7c3d14c8STreehugger Robot void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
657*7c3d14c8STreehugger Robot void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
658*7c3d14c8STreehugger Robot
659*7c3d14c8STreehugger Robot void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
660*7c3d14c8STreehugger Robot int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
661*7c3d14c8STreehugger Robot void MemoryAccessImpl(ThreadState *thr, uptr addr,
662*7c3d14c8STreehugger Robot int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
663*7c3d14c8STreehugger Robot u64 *shadow_mem, Shadow cur);
664*7c3d14c8STreehugger Robot void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
665*7c3d14c8STreehugger Robot uptr size, bool is_write);
666*7c3d14c8STreehugger Robot void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
667*7c3d14c8STreehugger Robot uptr size, uptr step, bool is_write);
668*7c3d14c8STreehugger Robot void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
669*7c3d14c8STreehugger Robot int size, bool kAccessIsWrite, bool kIsAtomic);
670*7c3d14c8STreehugger Robot
671*7c3d14c8STreehugger Robot const int kSizeLog1 = 0;
672*7c3d14c8STreehugger Robot const int kSizeLog2 = 1;
673*7c3d14c8STreehugger Robot const int kSizeLog4 = 2;
674*7c3d14c8STreehugger Robot const int kSizeLog8 = 3;
675*7c3d14c8STreehugger Robot
MemoryRead(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog)676*7c3d14c8STreehugger Robot void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
677*7c3d14c8STreehugger Robot uptr addr, int kAccessSizeLog) {
678*7c3d14c8STreehugger Robot MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
679*7c3d14c8STreehugger Robot }
680*7c3d14c8STreehugger Robot
MemoryWrite(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog)681*7c3d14c8STreehugger Robot void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
682*7c3d14c8STreehugger Robot uptr addr, int kAccessSizeLog) {
683*7c3d14c8STreehugger Robot MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
684*7c3d14c8STreehugger Robot }
685*7c3d14c8STreehugger Robot
MemoryReadAtomic(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog)686*7c3d14c8STreehugger Robot void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
687*7c3d14c8STreehugger Robot uptr addr, int kAccessSizeLog) {
688*7c3d14c8STreehugger Robot MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
689*7c3d14c8STreehugger Robot }
690*7c3d14c8STreehugger Robot
MemoryWriteAtomic(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog)691*7c3d14c8STreehugger Robot void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
692*7c3d14c8STreehugger Robot uptr addr, int kAccessSizeLog) {
693*7c3d14c8STreehugger Robot MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
694*7c3d14c8STreehugger Robot }
695*7c3d14c8STreehugger Robot
696*7c3d14c8STreehugger Robot void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
697*7c3d14c8STreehugger Robot void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
698*7c3d14c8STreehugger Robot void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
699*7c3d14c8STreehugger Robot
700*7c3d14c8STreehugger Robot void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
701*7c3d14c8STreehugger Robot void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
702*7c3d14c8STreehugger Robot void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
703*7c3d14c8STreehugger Robot void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
704*7c3d14c8STreehugger Robot
705*7c3d14c8STreehugger Robot void FuncEntry(ThreadState *thr, uptr pc);
706*7c3d14c8STreehugger Robot void FuncExit(ThreadState *thr);
707*7c3d14c8STreehugger Robot
708*7c3d14c8STreehugger Robot int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
709*7c3d14c8STreehugger Robot void ThreadStart(ThreadState *thr, int tid, uptr os_id);
710*7c3d14c8STreehugger Robot void ThreadFinish(ThreadState *thr);
711*7c3d14c8STreehugger Robot int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
712*7c3d14c8STreehugger Robot void ThreadJoin(ThreadState *thr, uptr pc, int tid);
713*7c3d14c8STreehugger Robot void ThreadDetach(ThreadState *thr, uptr pc, int tid);
714*7c3d14c8STreehugger Robot void ThreadFinalize(ThreadState *thr);
715*7c3d14c8STreehugger Robot void ThreadSetName(ThreadState *thr, const char *name);
716*7c3d14c8STreehugger Robot int ThreadCount(ThreadState *thr);
717*7c3d14c8STreehugger Robot void ProcessPendingSignals(ThreadState *thr);
718*7c3d14c8STreehugger Robot
719*7c3d14c8STreehugger Robot Processor *ProcCreate();
720*7c3d14c8STreehugger Robot void ProcDestroy(Processor *proc);
721*7c3d14c8STreehugger Robot void ProcWire(Processor *proc, ThreadState *thr);
722*7c3d14c8STreehugger Robot void ProcUnwire(Processor *proc, ThreadState *thr);
723*7c3d14c8STreehugger Robot
724*7c3d14c8STreehugger Robot void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
725*7c3d14c8STreehugger Robot bool rw, bool recursive, bool linker_init);
726*7c3d14c8STreehugger Robot void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
727*7c3d14c8STreehugger Robot void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
728*7c3d14c8STreehugger Robot bool try_lock = false);
729*7c3d14c8STreehugger Robot int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
730*7c3d14c8STreehugger Robot void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
731*7c3d14c8STreehugger Robot void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
732*7c3d14c8STreehugger Robot void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
733*7c3d14c8STreehugger Robot void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
734*7c3d14c8STreehugger Robot void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
735*7c3d14c8STreehugger Robot
736*7c3d14c8STreehugger Robot void Acquire(ThreadState *thr, uptr pc, uptr addr);
737*7c3d14c8STreehugger Robot // AcquireGlobal synchronizes the current thread with all other threads.
738*7c3d14c8STreehugger Robot // In terms of happens-before relation, it draws a HB edge from all threads
739*7c3d14c8STreehugger Robot // (where they happen to execute right now) to the current thread. We use it to
740*7c3d14c8STreehugger Robot // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
741*7c3d14c8STreehugger Robot // right before executing finalizers. This provides a coarse, but simple
742*7c3d14c8STreehugger Robot // approximation of the actual required synchronization.
743*7c3d14c8STreehugger Robot void AcquireGlobal(ThreadState *thr, uptr pc);
744*7c3d14c8STreehugger Robot void Release(ThreadState *thr, uptr pc, uptr addr);
745*7c3d14c8STreehugger Robot void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
746*7c3d14c8STreehugger Robot void AfterSleep(ThreadState *thr, uptr pc);
747*7c3d14c8STreehugger Robot void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
748*7c3d14c8STreehugger Robot void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
749*7c3d14c8STreehugger Robot void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
750*7c3d14c8STreehugger Robot void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
751*7c3d14c8STreehugger Robot
752*7c3d14c8STreehugger Robot // The hacky call uses custom calling convention and an assembly thunk.
753*7c3d14c8STreehugger Robot // It is considerably faster that a normal call for the caller
754*7c3d14c8STreehugger Robot // if it is not executed (it is intended for slow paths from hot functions).
755*7c3d14c8STreehugger Robot // The trick is that the call preserves all registers and the compiler
756*7c3d14c8STreehugger Robot // does not treat it as a call.
757*7c3d14c8STreehugger Robot // If it does not work for you, use normal call.
758*7c3d14c8STreehugger Robot #if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
759*7c3d14c8STreehugger Robot // The caller may not create the stack frame for itself at all,
760*7c3d14c8STreehugger Robot // so we create a reserve stack frame for it (1024b must be enough).
761*7c3d14c8STreehugger Robot #define HACKY_CALL(f) \
762*7c3d14c8STreehugger Robot __asm__ __volatile__("sub $1024, %%rsp;" \
763*7c3d14c8STreehugger Robot CFI_INL_ADJUST_CFA_OFFSET(1024) \
764*7c3d14c8STreehugger Robot ".hidden " #f "_thunk;" \
765*7c3d14c8STreehugger Robot "call " #f "_thunk;" \
766*7c3d14c8STreehugger Robot "add $1024, %%rsp;" \
767*7c3d14c8STreehugger Robot CFI_INL_ADJUST_CFA_OFFSET(-1024) \
768*7c3d14c8STreehugger Robot ::: "memory", "cc");
769*7c3d14c8STreehugger Robot #else
770*7c3d14c8STreehugger Robot #define HACKY_CALL(f) f()
771*7c3d14c8STreehugger Robot #endif
772*7c3d14c8STreehugger Robot
773*7c3d14c8STreehugger Robot void TraceSwitch(ThreadState *thr);
774*7c3d14c8STreehugger Robot uptr TraceTopPC(ThreadState *thr);
775*7c3d14c8STreehugger Robot uptr TraceSize();
776*7c3d14c8STreehugger Robot uptr TraceParts();
777*7c3d14c8STreehugger Robot Trace *ThreadTrace(int tid);
778*7c3d14c8STreehugger Robot
779*7c3d14c8STreehugger Robot extern "C" void __tsan_trace_switch();
TraceAddEvent(ThreadState * thr,FastState fs,EventType typ,u64 addr)780*7c3d14c8STreehugger Robot void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
781*7c3d14c8STreehugger Robot EventType typ, u64 addr) {
782*7c3d14c8STreehugger Robot if (!kCollectHistory)
783*7c3d14c8STreehugger Robot return;
784*7c3d14c8STreehugger Robot DCHECK_GE((int)typ, 0);
785*7c3d14c8STreehugger Robot DCHECK_LE((int)typ, 7);
786*7c3d14c8STreehugger Robot DCHECK_EQ(GetLsb(addr, 61), addr);
787*7c3d14c8STreehugger Robot StatInc(thr, StatEvents);
788*7c3d14c8STreehugger Robot u64 pos = fs.GetTracePos();
789*7c3d14c8STreehugger Robot if (UNLIKELY((pos % kTracePartSize) == 0)) {
790*7c3d14c8STreehugger Robot #ifndef SANITIZER_GO
791*7c3d14c8STreehugger Robot HACKY_CALL(__tsan_trace_switch);
792*7c3d14c8STreehugger Robot #else
793*7c3d14c8STreehugger Robot TraceSwitch(thr);
794*7c3d14c8STreehugger Robot #endif
795*7c3d14c8STreehugger Robot }
796*7c3d14c8STreehugger Robot Event *trace = (Event*)GetThreadTrace(fs.tid());
797*7c3d14c8STreehugger Robot Event *evp = &trace[pos];
798*7c3d14c8STreehugger Robot Event ev = (u64)addr | ((u64)typ << 61);
799*7c3d14c8STreehugger Robot *evp = ev;
800*7c3d14c8STreehugger Robot }
801*7c3d14c8STreehugger Robot
802*7c3d14c8STreehugger Robot #ifndef SANITIZER_GO
HeapEnd()803*7c3d14c8STreehugger Robot uptr ALWAYS_INLINE HeapEnd() {
804*7c3d14c8STreehugger Robot return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
805*7c3d14c8STreehugger Robot }
806*7c3d14c8STreehugger Robot #endif
807*7c3d14c8STreehugger Robot
808*7c3d14c8STreehugger Robot } // namespace __tsan
809*7c3d14c8STreehugger Robot
810*7c3d14c8STreehugger Robot #endif // TSAN_RTL_H
811