forked from abseil/abseil-cpp
-
Notifications
You must be signed in to change notification settings - Fork 1
/
mutex.cc
2753 lines (2547 loc) · 111 KB
/
mutex.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/synchronization/mutex.h"
#ifdef _WIN32
#include <windows.h>
#ifdef ERROR
#undef ERROR
#endif
#else
#include <fcntl.h>
#include <pthread.h>
#include <sched.h>
#include <sys/time.h>
#endif
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <algorithm>
#include <atomic>
#include <cinttypes>
#include <thread> // NOLINT(build/c++11)
#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/atomic_hook.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/hide_ptr.h"
#include "absl/base/internal/low_level_alloc.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/internal/sysinfo.h"
#include "absl/base/internal/thread_identity.h"
#include "absl/base/internal/tsan_mutex_interface.h"
#include "absl/base/port.h"
#include "absl/debugging/stacktrace.h"
#include "absl/debugging/symbolize.h"
#include "absl/synchronization/internal/graphcycles.h"
#include "absl/synchronization/internal/per_thread_sem.h"
#include "absl/time/time.h"
using absl::base_internal::CurrentThreadIdentityIfPresent;
using absl::base_internal::PerThreadSynch;
using absl::base_internal::SchedulingGuard;
using absl::base_internal::ThreadIdentity;
using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
using absl::synchronization_internal::GraphCycles;
using absl::synchronization_internal::GraphId;
using absl::synchronization_internal::InvalidGraphId;
using absl::synchronization_internal::KernelTimeout;
using absl::synchronization_internal::PerThreadSem;
extern "C" {
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
std::this_thread::yield();
}
} // extern "C"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
#if defined(ABSL_HAVE_THREAD_SANITIZER)
constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
#else
constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
#endif
ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
kDeadlockDetectionDefault);
ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
submit_profile_data;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
const char *msg, const void *obj, int64_t wait_cycles)>
mutex_tracer;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
cond_var_tracer;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
bool (*)(const void *pc, char *out, int out_size)>
symbolizer(absl::Symbolize);
} // namespace
static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
bool locking, bool trylock,
bool read_lock);
void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
submit_profile_data.Store(fn);
}
void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
int64_t wait_cycles)) {
mutex_tracer.Store(fn);
}
void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
cond_var_tracer.Store(fn);
}
void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
symbolizer.Store(fn);
}
namespace {
// Represents the strategy for spin and yield.
// See the comment in GetMutexGlobals() for more information.
enum DelayMode { AGGRESSIVE, GENTLE };
struct ABSL_CACHELINE_ALIGNED MutexGlobals {
absl::once_flag once;
int spinloop_iterations = 0;
int32_t mutex_sleep_limit[2] = {};
};
const MutexGlobals &GetMutexGlobals() {
ABSL_CONST_INIT static MutexGlobals data;
absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
const int num_cpus = absl::base_internal::NumCPUs();
data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
// If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
// aggressive then spin many times before yielding. If the mode is
// gentle then spin only a few times before yielding. Aggressive spinning
// is used to ensure that an Unlock() call, which must get the spin lock
// for any thread to make progress gets it without undue delay.
if (num_cpus > 1) {
data.mutex_sleep_limit[AGGRESSIVE] = 5000;
data.mutex_sleep_limit[GENTLE] = 250;
} else {
data.mutex_sleep_limit[AGGRESSIVE] = 0;
data.mutex_sleep_limit[GENTLE] = 0;
}
});
return data;
}
} // namespace
namespace synchronization_internal {
// Returns the Mutex delay on iteration `c` depending on the given `mode`.
// The returned value should be used as `c` for the next call to `MutexDelay`.
int MutexDelay(int32_t c, int mode) {
const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode];
if (c < limit) {
// Spin.
c++;
} else {
SchedulingGuard::ScopedEnable enable_rescheduling;
ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
if (c == limit) {
// Yield once.
ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
c++;
} else {
// Then wait.
absl::SleepFor(absl::Microseconds(10));
c = 0;
}
ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
}
return c;
}
} // namespace synchronization_internal
// --------------------------Generic atomic ops
// Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
// "*pv | bits" if necessary. Wait until (*pv & wait_until_clear)==0
// before making any change.
// This is used to set flags in mutex and condition variable words.
static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
intptr_t wait_until_clear) {
intptr_t v;
do {
v = pv->load(std::memory_order_relaxed);
} while ((v & bits) != bits &&
((v & wait_until_clear) != 0 ||
!pv->compare_exchange_weak(v, v | bits,
std::memory_order_release,
std::memory_order_relaxed)));
}
// Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
// "*pv & ~bits" if necessary. Wait until (*pv & wait_until_clear)==0
// before making any change.
// This is used to unset flags in mutex and condition variable words.
static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
intptr_t wait_until_clear) {
intptr_t v;
do {
v = pv->load(std::memory_order_relaxed);
} while ((v & bits) != 0 &&
((v & wait_until_clear) != 0 ||
!pv->compare_exchange_weak(v, v & ~bits,
std::memory_order_release,
std::memory_order_relaxed)));
}
//------------------------------------------------------------------
// Data for doing deadlock detection.
ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
// Graph used to detect deadlocks.
ABSL_CONST_INIT static GraphCycles *deadlock_graph
ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
//------------------------------------------------------------------
// An event mechanism for debugging mutex use.
// It also allows mutexes to be given names for those who can't handle
// addresses, and instead like to give their data structures names like
// "Henry", "Fido", or "Rupert IV, King of Yondavia".
namespace { // to prevent name pollution
enum { // Mutex and CondVar events passed as "ev" to PostSynchEvent
// Mutex events
SYNCH_EV_TRYLOCK_SUCCESS,
SYNCH_EV_TRYLOCK_FAILED,
SYNCH_EV_READERTRYLOCK_SUCCESS,
SYNCH_EV_READERTRYLOCK_FAILED,
SYNCH_EV_LOCK,
SYNCH_EV_LOCK_RETURNING,
SYNCH_EV_READERLOCK,
SYNCH_EV_READERLOCK_RETURNING,
SYNCH_EV_UNLOCK,
SYNCH_EV_READERUNLOCK,
// CondVar events
SYNCH_EV_WAIT,
SYNCH_EV_WAIT_RETURNING,
SYNCH_EV_SIGNAL,
SYNCH_EV_SIGNALALL,
};
enum { // Event flags
SYNCH_F_R = 0x01, // reader event
SYNCH_F_LCK = 0x02, // PostSynchEvent called with mutex held
SYNCH_F_TRY = 0x04, // TryLock or ReaderTryLock
SYNCH_F_UNLOCK = 0x08, // Unlock or ReaderUnlock
SYNCH_F_LCK_W = SYNCH_F_LCK,
SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
};
} // anonymous namespace
// Properties of the events.
static const struct {
int flags;
const char *msg;
} event_properties[] = {
{SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
{0, "TryLock failed "},
{SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
{0, "ReaderTryLock failed "},
{0, "Lock blocking "},
{SYNCH_F_LCK_W, "Lock returning "},
{0, "ReaderLock blocking "},
{SYNCH_F_LCK_R, "ReaderLock returning "},
{SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
{SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
{0, "Wait on "},
{0, "Wait unblocked "},
{0, "Signal on "},
{0, "SignalAll on "},
};
ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
// Hash table size; should be prime > 2.
// Can't be too small, as it's used for deadlock detection information.
static constexpr uint32_t kNSynchEvent = 1031;
static struct SynchEvent { // this is a trivial hash table for the events
// struct is freed when refcount reaches 0
int refcount ABSL_GUARDED_BY(synch_event_mu);
// buckets have linear, 0-terminated chains
SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
// Constant after initialization
uintptr_t masked_addr; // object at this address is called "name"
// No explicit synchronization used. Instead we assume that the
// client who enables/disables invariants/logging on a Mutex does so
// while the Mutex is not being concurrently accessed by others.
void (*invariant)(void *arg); // called on each event
void *arg; // first arg to (*invariant)()
bool log; // logging turned on
// Constant after initialization
char name[1]; // actually longer---NUL-terminated string
} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
// Ensure that the object at "addr" has a SynchEvent struct associated with it,
// set "bits" in the word there (waiting until lockbit is clear before doing
// so), and return a refcounted reference that will remain valid until
// UnrefSynchEvent() is called. If a new SynchEvent is allocated,
// the string name is copied into it.
// When used with a mutex, the caller should also ensure that kMuEvent
// is set in the mutex word, and similarly for condition variables and kCVEvent.
static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
const char *name, intptr_t bits,
intptr_t lockbit) {
uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
SynchEvent *e;
// first look for existing SynchEvent struct..
synch_event_mu.Lock();
for (e = synch_event[h];
e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
e = e->next) {
}
if (e == nullptr) { // no SynchEvent struct found; make one.
if (name == nullptr) {
name = "";
}
size_t l = strlen(name);
e = reinterpret_cast<SynchEvent *>(
base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
e->refcount = 2; // one for return value, one for linked list
e->masked_addr = base_internal::HidePtr(addr);
e->invariant = nullptr;
e->arg = nullptr;
e->log = false;
strcpy(e->name, name); // NOLINT(runtime/printf)
e->next = synch_event[h];
AtomicSetBits(addr, bits, lockbit);
synch_event[h] = e;
} else {
e->refcount++; // for return value
}
synch_event_mu.Unlock();
return e;
}
// Deallocate the SynchEvent *e, whose refcount has fallen to zero.
static void DeleteSynchEvent(SynchEvent *e) {
base_internal::LowLevelAlloc::Free(e);
}
// Decrement the reference count of *e, or do nothing if e==null.
static void UnrefSynchEvent(SynchEvent *e) {
if (e != nullptr) {
synch_event_mu.Lock();
bool del = (--(e->refcount) == 0);
synch_event_mu.Unlock();
if (del) {
DeleteSynchEvent(e);
}
}
}
// Forget the mapping from the object (Mutex or CondVar) at address addr
// to SynchEvent object, and clear "bits" in its word (waiting until lockbit
// is clear before doing so).
static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
intptr_t lockbit) {
uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
SynchEvent **pe;
SynchEvent *e;
synch_event_mu.Lock();
for (pe = &synch_event[h];
(e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
pe = &e->next) {
}
bool del = false;
if (e != nullptr) {
*pe = e->next;
del = (--(e->refcount) == 0);
}
AtomicClearBits(addr, bits, lockbit);
synch_event_mu.Unlock();
if (del) {
DeleteSynchEvent(e);
}
}
// Return a refcounted reference to the SynchEvent of the object at address
// "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
// called.
static SynchEvent *GetSynchEvent(const void *addr) {
uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
SynchEvent *e;
synch_event_mu.Lock();
for (e = synch_event[h];
e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
e = e->next) {
}
if (e != nullptr) {
e->refcount++;
}
synch_event_mu.Unlock();
return e;
}
// Called when an event "ev" occurs on a Mutex of CondVar "obj"
// if event recording is on
static void PostSynchEvent(void *obj, int ev) {
SynchEvent *e = GetSynchEvent(obj);
// logging is on if event recording is on and either there's no event struct,
// or it explicitly says to log
if (e == nullptr || e->log) {
void *pcs[40];
int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
// A buffer with enough space for the ASCII for all the PCs, even on a
// 64-bit machine.
char buffer[ABSL_ARRAYSIZE(pcs) * 24];
int pos = snprintf(buffer, sizeof (buffer), " @");
for (int i = 0; i != n; i++) {
pos += snprintf(&buffer[pos], sizeof (buffer) - pos, " %p", pcs[i]);
}
ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
(e == nullptr ? "" : e->name), buffer);
}
const int flags = event_properties[ev].flags;
if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
// Calling the invariant as is causes problems under ThreadSanitizer.
// We are currently inside of Mutex Lock/Unlock and are ignoring all
// memory accesses and synchronization. If the invariant transitively
// synchronizes something else and we ignore the synchronization, we will
// get false positive race reports later.
// Reuse EvalConditionAnnotated to properly call into user code.
struct local {
static bool pred(SynchEvent *ev) {
(*ev->invariant)(ev->arg);
return false;
}
};
Condition cond(&local::pred, e);
Mutex *mu = static_cast<Mutex *>(obj);
const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
const bool trylock = (flags & SYNCH_F_TRY) != 0;
const bool read_lock = (flags & SYNCH_F_R) != 0;
EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
}
UnrefSynchEvent(e);
}
//------------------------------------------------------------------
// The SynchWaitParams struct encapsulates the way in which a thread is waiting:
// whether it has a timeout, the condition, exclusive/shared, and whether a
// condition variable wait has an associated Mutex (as opposed to another
// type of lock). It also points to the PerThreadSynch struct of its thread.
// cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().
//
// This structure is held on the stack rather than directly in
// PerThreadSynch because a thread can be waiting on multiple Mutexes if,
// while waiting on one Mutex, the implementation calls a client callback
// (such as a Condition function) that acquires another Mutex. We don't
// strictly need to allow this, but programmers become confused if we do not
// allow them to use functions such a LOG() within Condition functions. The
// PerThreadSynch struct points at the most recent SynchWaitParams struct when
// the thread is on a Mutex's waiter queue.
struct SynchWaitParams {
SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
KernelTimeout timeout_arg, Mutex *cvmu_arg,
PerThreadSynch *thread_arg,
std::atomic<intptr_t> *cv_word_arg)
: how(how_arg),
cond(cond_arg),
timeout(timeout_arg),
cvmu(cvmu_arg),
thread(thread_arg),
cv_word(cv_word_arg),
contention_start_cycles(base_internal::CycleClock::Now()) {}
const Mutex::MuHow how; // How this thread needs to wait.
const Condition *cond; // The condition that this thread is waiting for.
// In Mutex, this field is set to zero if a timeout
// expires.
KernelTimeout timeout; // timeout expiry---absolute time
// In Mutex, this field is set to zero if a timeout
// expires.
Mutex *const cvmu; // used for transfer from cond var to mutex
PerThreadSynch *const thread; // thread that is waiting
// If not null, thread should be enqueued on the CondVar whose state
// word is cv_word instead of queueing normally on the Mutex.
std::atomic<intptr_t> *cv_word;
int64_t contention_start_cycles; // Time (in cycles) when this thread started
// to contend for the mutex.
};
struct SynchLocksHeld {
int n; // number of valid entries in locks[]
bool overflow; // true iff we overflowed the array at some point
struct {
Mutex *mu; // lock acquired
int32_t count; // times acquired
GraphId id; // deadlock_graph id of acquired lock
} locks[40];
// If a thread overfills the array during deadlock detection, we
// continue, discarding information as needed. If no overflow has
// taken place, we can provide more error checking, such as
// detecting when a thread releases a lock it does not hold.
};
// A sentinel value in lists that is not 0.
// A 0 value is used to mean "not on a list".
static PerThreadSynch *const kPerThreadSynchNull =
reinterpret_cast<PerThreadSynch *>(1);
static SynchLocksHeld *LocksHeldAlloc() {
SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
ret->n = 0;
ret->overflow = false;
return ret;
}
// Return the PerThreadSynch-struct for this thread.
static PerThreadSynch *Synch_GetPerThread() {
ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
return &identity->per_thread_synch;
}
static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
}
PerThreadSynch *w = Synch_GetPerThread();
if (mu) {
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
}
return w;
}
static SynchLocksHeld *Synch_GetAllLocks() {
PerThreadSynch *s = Synch_GetPerThread();
if (s->all_locks == nullptr) {
s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
}
return s->all_locks;
}
// Post on "w"'s associated PerThreadSem.
void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
}
PerThreadSem::Post(w->thread_identity());
if (mu) {
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
}
}
// Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
}
assert(w == Synch_GetPerThread());
static_cast<void>(w);
bool res = PerThreadSem::Wait(t);
if (mu) {
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
}
return res;
}
// We're in a fatal signal handler that hopes to use Mutex and to get
// lucky by not deadlocking. We try to improve its chances of success
// by effectively disabling some of the consistency checks. This will
// prevent certain ABSL_RAW_CHECK() statements from being triggered when
// re-rentry is detected. The ABSL_RAW_CHECK() statements are those in the
// Mutex code checking that the "waitp" field has not been reused.
void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
// Fix the per-thread state only if it exists.
ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
if (identity != nullptr) {
identity->per_thread_synch.suppress_fatal_errors = true;
}
// Don't do deadlock detection when we are already failing.
synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
std::memory_order_release);
}
// --------------------------time support
// Return the current time plus the timeout. Use the same clock as
// PerThreadSem::Wait() for consistency. Unfortunately, we don't have
// such a choice when a deadline is given directly.
static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
#ifndef _WIN32
struct timeval tv;
gettimeofday(&tv, nullptr);
return absl::TimeFromTimeval(tv) + timeout;
#else
return absl::Now() + timeout;
#endif
}
// --------------------------Mutexes
// In the layout below, the msb of the bottom byte is currently unused. Also,
// the following constraints were considered in choosing the layout:
// o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and
// 0xcd) are illegal: reader and writer lock both held.
// o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the
// bit-twiddling trick in Mutex::Unlock().
// o kMuWriter / kMuReader == kMuWrWait / kMuWait,
// to enable the bit-twiddling trick in CheckForMutexCorruption().
static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
static const intptr_t kMuDesig = 0x0002L; // there's a designated waker
static const intptr_t kMuWait = 0x0004L; // threads are waiting
static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
// INVARIANT1: there's a thread that was blocked on the mutex, is
// no longer, yet has not yet acquired the mutex. If there's a
// designated waker, all threads can avoid taking the slow path in
// unlock because the designated waker will subsequently acquire
// the lock and wake someone. To maintain INVARIANT1 the bit is
// set when a thread is unblocked(INV1a), and threads that were
// unblocked reset the bit when they either acquire or re-block
// (INV1b).
static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting
// for a reader
static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
// Hack to make constant values available to gdb pretty printer
enum {
kGdbMuSpin = kMuSpin,
kGdbMuEvent = kMuEvent,
kGdbMuWait = kMuWait,
kGdbMuWriter = kMuWriter,
kGdbMuDesig = kMuDesig,
kGdbMuWrWait = kMuWrWait,
kGdbMuReader = kMuReader,
kGdbMuLow = kMuLow,
};
// kMuWrWait implies kMuWait.
// kMuReader and kMuWriter are mutually exclusive.
// If kMuReader is zero, there are no readers.
// Otherwise, if kMuWait is zero, the high order bits contain a count of the
// number of readers. Otherwise, the reader count is held in
// PerThreadSynch::readers of the most recently queued waiter, again in the
// bits above kMuLow.
static const intptr_t kMuOne = 0x0100; // a count of one reader
// flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
static const int kMuHasBlocked = 0x01; // already blocked (MUST == 1)
static const int kMuIsCond = 0x02; // conditional waiter (CV or Condition)
static_assert(PerThreadSynch::kAlignment > kMuLow,
"PerThreadSynch::kAlignment must be greater than kMuLow");
// This struct contains various bitmasks to be used in
// acquiring and releasing a mutex in a particular mode.
struct MuHowS {
// if all the bits in fast_need_zero are zero, the lock can be acquired by
// adding fast_add and oring fast_or. The bit kMuDesig should be reset iff
// this is the designated waker.
intptr_t fast_need_zero;
intptr_t fast_or;
intptr_t fast_add;
intptr_t slow_need_zero; // fast_need_zero with events (e.g. logging)
intptr_t slow_inc_need_zero; // if all the bits in slow_inc_need_zero are
// zero a reader can acquire a read share by
// setting the reader bit and incrementing
// the reader count (in last waiter since
// we're now slow-path). kMuWrWait be may
// be ignored if we already waited once.
};
static const MuHowS kSharedS = {
// shared or read lock
kMuWriter | kMuWait | kMuEvent, // fast_need_zero
kMuReader, // fast_or
kMuOne, // fast_add
kMuWriter | kMuWait, // slow_need_zero
kMuSpin | kMuWriter | kMuWrWait, // slow_inc_need_zero
};
static const MuHowS kExclusiveS = {
// exclusive or write lock
kMuWriter | kMuReader | kMuEvent, // fast_need_zero
kMuWriter, // fast_or
0, // fast_add
kMuWriter | kMuReader, // slow_need_zero
~static_cast<intptr_t>(0), // slow_inc_need_zero
};
static const Mutex::MuHow kShared = &kSharedS; // shared lock
static const Mutex::MuHow kExclusive = &kExclusiveS; // exclusive lock
#ifdef NDEBUG
static constexpr bool kDebugMode = false;
#else
static constexpr bool kDebugMode = true;
#endif
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
static unsigned TsanFlags(Mutex::MuHow how) {
return how == kShared ? __tsan_mutex_read_lock : 0;
}
#endif
static bool DebugOnlyIsExiting() {
return false;
}
Mutex::~Mutex() {
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
}
if (kDebugMode) {
this->ForgetDeadlockInfo();
}
ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
}
void Mutex::EnableDebugLog(const char *name) {
SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
e->log = true;
UnrefSynchEvent(e);
}
void EnableMutexInvariantDebugging(bool enabled) {
synch_check_invariants.store(enabled, std::memory_order_release);
}
void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
void *arg) {
if (synch_check_invariants.load(std::memory_order_acquire) &&
invariant != nullptr) {
SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
e->invariant = invariant;
e->arg = arg;
UnrefSynchEvent(e);
}
}
void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
synch_deadlock_detection.store(mode, std::memory_order_release);
}
// Return true iff threads x and y are part of the same equivalence
// class of waiters. An equivalence class is defined as the set of
// waiters with the same condition, type of lock, and thread priority.
//
// Requires that x and y be waiting on the same Mutex queue.
static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
return x->waitp->how == y->waitp->how && x->priority == y->priority &&
Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
}
// Given the contents of a mutex word containing a PerThreadSynch pointer,
// return the pointer.
static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
}
// The next several routines maintain the per-thread next and skip fields
// used in the Mutex waiter queue.
// The queue is a circular singly-linked list, of which the "head" is the
// last element, and head->next if the first element.
// The skip field has the invariant:
// For thread x, x->skip is one of:
// - invalid (iff x is not in a Mutex wait queue),
// - null, or
// - a pointer to a distinct thread waiting later in the same Mutex queue
// such that all threads in [x, x->skip] have the same condition, priority
// and lock type (MuEquivalentWaiter() is true for all pairs in [x,
// x->skip]).
// In addition, if x->skip is valid, (x->may_skip || x->skip == null)
//
// By the spec of MuEquivalentWaiter(), it is not necessary when removing the
// first runnable thread y from the front a Mutex queue to adjust the skip
// field of another thread x because if x->skip==y, x->skip must (have) become
// invalid before y is removed. The function TryRemove can remove a specified
// thread from an arbitrary position in the queue whether runnable or not, so
// it fixes up skip fields that would otherwise be left dangling.
// The statement
// if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
// maintains the invariant provided x is not the last waiter in a Mutex queue
// The statement
// if (x->skip != null) { x->skip = x->skip->skip; }
// maintains the invariant.
// Returns the last thread y in a mutex waiter queue such that all threads in
// [x, y] inclusive share the same condition. Sets skip fields of some threads
// in that range to optimize future evaluation of Skip() on x values in
// the range. Requires thread x is in a mutex waiter queue.
// The locking is unusual. Skip() is called under these conditions:
// - spinlock is held in call from Enqueue(), with maybe_unlocking == false
// - Mutex is held in call from UnlockSlow() by last unlocker, with
// maybe_unlocking == true
// - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
// UnlockSlow()) and TryRemove()
// These cases are mutually exclusive, so Skip() never runs concurrently
// with itself on the same Mutex. The skip chain is used in these other places
// that cannot occur concurrently:
// - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
// - Dequeue() (with spinlock and Mutex held)
// - UnlockSlow() (with spinlock and Mutex held)
// A more complex case is Enqueue()
// - Enqueue() (with spinlock held and maybe_unlocking == false)
// This is the first case in which Skip is called, above.
// - Enqueue() (without spinlock held; but queue is empty and being freshly
// formed)
// - Enqueue() (with spinlock held and maybe_unlocking == true)
// The first case has mutual exclusion, and the second isolation through
// working on an otherwise unreachable data structure.
// In the last case, Enqueue() is required to change no skip/next pointers
// except those in the added node and the former "head" node. This implies
// that the new node is added after head, and so must be the new head or the
// new front of the queue.
static PerThreadSynch *Skip(PerThreadSynch *x) {
PerThreadSynch *x0 = nullptr;
PerThreadSynch *x1 = x;
PerThreadSynch *x2 = x->skip;
if (x2 != nullptr) {
// Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
// such that x1 == x0->skip && x2 == x1->skip
while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
x0->skip = x2; // short-circuit skip from x0 to x2
}
x->skip = x1; // short-circuit skip from x to result
}
return x1;
}
// "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.
// The latter is going to be removed out of order, because of a timeout.
// Check whether "ancestor" has a skip field pointing to "to_be_removed",
// and fix it if it does.
static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
if (to_be_removed->skip != nullptr) {
ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
} else if (ancestor->next != to_be_removed) { // they are not adjacent
ancestor->skip = ancestor->next; // can skip one past ancestor
} else {
ancestor->skip = nullptr; // can't skip at all
}
}
}
static void CondVarEnqueue(SynchWaitParams *waitp);
// Enqueue thread "waitp->thread" on a waiter queue.
// Called with mutex spinlock held if head != nullptr
// If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
// idempotent; it alters no state associated with the existing (empty)
// queue.
//
// If waitp->cv_word == nullptr, queue the thread at either the front or
// the end (according to its priority) of the circular mutex waiter queue whose
// head is "head", and return the new head. mu is the previous mutex state,
// which contains the reader count (perhaps adjusted for the operation in
// progress) if the list was empty and a read lock held, and the holder hint if
// the list was empty and a write lock held. (flags & kMuIsCond) indicates
// whether this thread was transferred from a CondVar or is waiting for a
// non-trivial condition. In this case, Enqueue() never returns nullptr
//
// If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
// returned. This mechanism is used by CondVar to queue a thread on the
// condition variable queue instead of the mutex queue in implementing Wait().
// In this case, Enqueue() can return nullptr (if head==nullptr).
static PerThreadSynch *Enqueue(PerThreadSynch *head,
SynchWaitParams *waitp, intptr_t mu, int flags) {
// If we have been given a cv_word, call CondVarEnqueue() and return
// the previous head of the Mutex waiter queue.
if (waitp->cv_word != nullptr) {
CondVarEnqueue(waitp);
return head;
}
PerThreadSynch *s = waitp->thread;
ABSL_RAW_CHECK(
s->waitp == nullptr || // normal case
s->waitp == waitp || // Fer()---transfer from condition variable
s->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
s->waitp = waitp;
s->skip = nullptr; // maintain skip invariant (see above)
s->may_skip = true; // always true on entering queue
s->wake = false; // not being woken
s->cond_waiter = ((flags & kMuIsCond) != 0);
if (head == nullptr) { // s is the only waiter
s->next = s; // it's the only entry in the cycle
s->readers = mu; // reader count is from mu word
s->maybe_unlocking = false; // no one is searching an empty list
head = s; // s is new head
} else {
PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
int64_t now_cycles = base_internal::CycleClock::Now();
if (s->next_priority_read_cycles < now_cycles) {
// Every so often, update our idea of the thread's priority.
// pthread_getschedparam() is 5% of the block/wakeup time;
// base_internal::CycleClock::Now() is 0.5%.
int policy;
struct sched_param param;
const int err = pthread_getschedparam(pthread_self(), &policy, ¶m);
if (err != 0) {
ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
} else {
s->priority = param.sched_priority;
s->next_priority_read_cycles =
now_cycles +
static_cast<int64_t>(base_internal::CycleClock::Frequency());
}
}
if (s->priority > head->priority) { // s's priority is above head's
// try to put s in priority-fifo order, or failing that at the front.
if (!head->maybe_unlocking) {
// No unlocker can be scanning the queue, so we can insert into the
// middle of the queue.
//
// Within a skip chain, all waiters have the same priority, so we can
// skip forward through the chains until we find one with a lower
// priority than the waiter to be enqueued.
PerThreadSynch *advance_to = head; // next value of enqueue_after
do {
enqueue_after = advance_to;
// (side-effect: optimizes skip chain)
advance_to = Skip(enqueue_after->next);
} while (s->priority <= advance_to->priority);
// termination guaranteed because s->priority > head->priority
// and head is the end of a skip chain
} else if (waitp->how == kExclusive &&
Condition::GuaranteedEqual(waitp->cond, nullptr)) {
// An unlocker could be scanning the queue, but we know it will recheck
// the queue front for writers that have no condition, which is what s
// is, so an insert at front is safe.
enqueue_after = head; // add after head, at front
}
}
#endif
if (enqueue_after != nullptr) {
s->next = enqueue_after->next;
enqueue_after->next = s;
// enqueue_after can be: head, Skip(...), or cur.
// The first two imply enqueue_after->skip == nullptr, and
// the last is used only if MuEquivalentWaiter(s, cur).
// We require this because clearing enqueue_after->skip
// is impossible; enqueue_after's predecessors might also
// incorrectly skip over s if we were to allow other
// insertion points.
ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
MuEquivalentWaiter(enqueue_after, s),
"Mutex Enqueue failure");
if (enqueue_after != head && enqueue_after->may_skip &&
MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
// enqueue_after can skip to its new successor, s
enqueue_after->skip = enqueue_after->next;
}
if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
s->skip = s->next; // s may skip to its successor
}
} else { // enqueue not done any other way, so
// we're inserting s at the back
// s will become new head; copy data from head into it
s->next = head->next; // add s after head
head->next = s;
s->readers = head->readers; // reader count is from previous head
s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
if (head->may_skip && MuEquivalentWaiter(head, s)) {
// head now has successor; may skip
head->skip = s;
}
head = s; // s is new head
}
}
s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
return head;
}