comparison CSP2/CSP2_env/env-d9b9114564458d9d-741b3de822f2aaca6c6caa4325c4afce/include/kj/mutex.h @ 69:33d812a61356

planemo upload commit 2e9511a184a1ca667c7be0c6321a36dc4e3d116d
author jpayne
date Tue, 18 Mar 2025 17:55:14 -0400
parents
children
comparison
equal deleted inserted replaced
67:0e9998148a16 69:33d812a61356
1 // Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors
2 // Licensed under the MIT License:
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21
22 #pragma once
23
24 #include "debug.h"
25 #include "memory.h"
26 #include <inttypes.h>
27 #include "time.h"
28 #include "source-location.h"
29 #include "one-of.h"
30
31 KJ_BEGIN_HEADER
32
33 #if __linux__ && !defined(KJ_USE_FUTEX)
34 #define KJ_USE_FUTEX 1
35 #endif
36
37 #if !KJ_USE_FUTEX && !_WIN32 && !__CYGWIN__
38 // We fall back to pthreads when we don't have a better platform-specific primitive. pthreads
39 // mutexes are bloated, though, so we like to avoid them. Hence on Linux we use futex(), and on
40 // Windows we use SRW locks and friends. On Cygwin we prefer the Win32 primitives both because they
41 // are more efficient and because I ran into problems with Cygwin's implementation of RW locks
42 // seeming to allow multiple threads to lock the same mutex (but I didn't investigate very
43 // closely).
44 //
45 // TODO(someday): Write efficient low-level locking primitives for other platforms.
46 #include <pthread.h>
47 #endif
48
49 // There are 3 macros controlling lock tracking:
50 // KJ_TRACK_LOCK_BLOCKING will set up async signal safe TLS variables that can be used to identify
51 // the KJ primitive blocking the current thread.
52 // KJ_SAVE_ACQUIRED_LOCK_INFO will allow introspection of a Mutex to get information about what is
53 // currently holding the lock.
54 // KJ_TRACK_LOCK_ACQUISITION is automatically enabled by either one of them.
55
56 #if KJ_TRACK_LOCK_BLOCKING
57 // Lock tracking is required to keep track of what blocked.
58 #define KJ_TRACK_LOCK_ACQUISITION 1
59 #endif
60
61 #if KJ_SAVE_ACQUIRED_LOCK_INFO
62 #define KJ_TRACK_LOCK_ACQUISITION 1
63 #include <unistd.h>
64 #endif
65
66 namespace kj {
67 #if KJ_TRACK_LOCK_ACQUISITION
68 #if !KJ_USE_FUTEX
69 #error Lock tracking is only currently supported for futex-based mutexes.
70 #endif
71
72 #if !KJ_COMPILER_SUPPORTS_SOURCE_LOCATION
73 #error C++20 or newer is required (or the use of clang/gcc).
74 #endif
75
76 using LockSourceLocation = SourceLocation;
77 using LockSourceLocationArg = const SourceLocation&;
78 // On x86-64 the codegen is optimal if the argument has type const& for the location. However,
79 // since this conflicts with the optimal call signature for NoopSourceLocation,
80 // LockSourceLocationArg is used to conditionally select the right type without polluting the usage
81 // themselves. Interestingly this makes no difference on ARM.
82 // https://godbolt.org/z/q6G8ee5a3
83 #else
84 using LockSourceLocation = NoopSourceLocation;
85 using LockSourceLocationArg = NoopSourceLocation;
86 #endif
87
88
89 class Exception;
90
91 // =======================================================================================
92 // Private details -- public interfaces follow below.
93
94 namespace _ { // private
95
96 #if KJ_SAVE_ACQUIRED_LOCK_INFO
97 class HoldingExclusively {
98 // The lock is being held in exclusive mode.
99 public:
100 constexpr HoldingExclusively(pid_t tid, const SourceLocation& location)
101 : heldBy(tid), acquiredAt(location) {}
102
103 pid_t threadHoldingLock() const { return heldBy; }
104 const SourceLocation& lockAcquiredAt() const { return acquiredAt; }
105
106 private:
107 pid_t heldBy;
108 SourceLocation acquiredAt;
109 };
110
111 class HoldingShared {
112 // The lock is being held in shared mode currently. Which threads are holding this lock open
113 // is unknown.
114 public:
115 constexpr HoldingShared(const SourceLocation& location) : acquiredAt(location) {}
116
117 const SourceLocation& lockAcquiredAt() const { return acquiredAt; }
118
119 private:
120 SourceLocation acquiredAt;
121 };
122 #endif
123
124 class Mutex {
125 // Internal implementation details. See `MutexGuarded<T>`.
126
127 struct Waiter;
128
129 public:
130 Mutex();
131 ~Mutex();
132 KJ_DISALLOW_COPY_AND_MOVE(Mutex);
133
134 enum Exclusivity {
135 EXCLUSIVE,
136 SHARED
137 };
138
139 bool lock(Exclusivity exclusivity, Maybe<Duration> timeout, LockSourceLocationArg location);
140 void unlock(Exclusivity exclusivity, Waiter* waiterToSkip = nullptr);
141
142 void assertLockedByCaller(Exclusivity exclusivity) const;
143 // In debug mode, assert that the mutex is locked by the calling thread, or if that is
144 // non-trivial, assert that the mutex is locked (which should be good enough to catch problems
145 // in unit tests). In non-debug builds, do nothing.
146
147 class Predicate {
148 public:
149 virtual bool check() = 0;
150 };
151
152 void wait(Predicate& predicate, Maybe<Duration> timeout, LockSourceLocationArg location);
153 // If predicate.check() returns false, unlock the mutex until predicate.check() returns true, or
154 // when the timeout (if any) expires. The mutex is always re-locked when this returns regardless
155 // of whether the timeout expired, and including if it throws.
156 //
157 // Requires that the mutex is already exclusively locked before calling.
158
159 void induceSpuriousWakeupForTest();
160 // Utility method for mutex-test.c++ which causes a spurious thread wakeup on all threads that
161 // are waiting for a wait() condition. Assuming correct implementation, all those threads
162 // should immediately go back to sleep.
163
164 #if KJ_USE_FUTEX
165 uint numReadersWaitingForTest() const;
166 // The number of reader locks that are currently blocked on this lock (must be called while
167 // holding the writer lock). This is really only a utility method for mutex-test.c++ so it can
168 // validate certain invariants.
169 #endif
170
171 #if KJ_SAVE_ACQUIRED_LOCK_INFO
172 using AcquiredMetadata = kj::OneOf<HoldingExclusively, HoldingShared>;
173 KJ_DISABLE_TSAN AcquiredMetadata lockedInfo() const;
174 // Returns metadata about this lock when its held. This method is async signal safe. It must also
175 // be called in a state where it's guaranteed that the lock state won't be released by another
176 // thread. In other words this has to be called from the signal handler within the thread that's
177 // holding the lock.
178 #endif
179
180 private:
181 #if KJ_USE_FUTEX
182 uint futex;
183 // bit 31 (msb) = set if exclusive lock held
184 // bit 30 (msb) = set if threads are waiting for exclusive lock
185 // bits 0-29 = count of readers; If an exclusive lock is held, this is the count of threads
186 // waiting for a read lock, otherwise it is the count of threads that currently hold a read
187 // lock.
188
189 #ifdef KJ_CONTENTION_WARNING_THRESHOLD
190 bool printContendedReader = false;
191 #endif
192
193 static constexpr uint EXCLUSIVE_HELD = 1u << 31;
194 static constexpr uint EXCLUSIVE_REQUESTED = 1u << 30;
195 static constexpr uint SHARED_COUNT_MASK = EXCLUSIVE_REQUESTED - 1;
196
197 #elif _WIN32 || __CYGWIN__
198 uintptr_t srwLock; // Actually an SRWLOCK, but don't want to #include <windows.h> in header.
199
200 #else
201 mutable pthread_rwlock_t mutex;
202 #endif
203
204 #if KJ_SAVE_ACQUIRED_LOCK_INFO
205 pid_t lockedExclusivelyByThread = 0;
206 SourceLocation lockAcquiredLocation;
207
208 KJ_DISABLE_TSAN void acquiredExclusive(pid_t tid, const SourceLocation& location) noexcept {
209 lockAcquiredLocation = location;
210 __atomic_store_n(&lockedExclusivelyByThread, tid, __ATOMIC_RELAXED);
211 }
212
213 KJ_DISABLE_TSAN void acquiredShared(const SourceLocation& location) noexcept {
214 lockAcquiredLocation = location;
215 }
216
217 KJ_DISABLE_TSAN SourceLocation releasingExclusive() noexcept {
218 auto tmp = lockAcquiredLocation;
219 lockAcquiredLocation = SourceLocation{};
220 lockedExclusivelyByThread = 0;
221 return tmp;
222 }
223 #else
224 static constexpr void acquiredExclusive(uint, LockSourceLocationArg) {}
225 static constexpr void acquiredShared(LockSourceLocationArg) {}
226 static constexpr NoopSourceLocation releasingExclusive() { return NoopSourceLocation{}; }
227 #endif
228 struct Waiter {
229 kj::Maybe<Waiter&> next;
230 kj::Maybe<Waiter&>* prev;
231 Predicate& predicate;
232 Maybe<Own<Exception>> exception;
233 #if KJ_USE_FUTEX
234 uint futex;
235 bool hasTimeout;
236 #elif _WIN32 || __CYGWIN__
237 uintptr_t condvar;
238 // Actually CONDITION_VARIABLE, but don't want to #include <windows.h> in header.
239 #else
240 pthread_cond_t condvar;
241
242 pthread_mutex_t stupidMutex;
243 // pthread condvars are only compatible with basic pthread mutexes, not rwlocks, for no
244 // particularly good reason. To work around this, we need an extra mutex per condvar.
245 #endif
246 };
247
248 kj::Maybe<Waiter&> waitersHead = nullptr;
249 kj::Maybe<Waiter&>* waitersTail = &waitersHead;
250 // linked list of waiters; can only modify under lock
251
252 inline void addWaiter(Waiter& waiter);
253 inline void removeWaiter(Waiter& waiter);
254 bool checkPredicate(Waiter& waiter);
255 #if _WIN32 || __CYGWIN__
256 void wakeReadyWaiter(Waiter* waiterToSkip);
257 #endif
258 };
259
260 class Once {
261 // Internal implementation details. See `Lazy<T>`.
262
263 public:
264 #if KJ_USE_FUTEX
265 inline Once(bool startInitialized = false)
266 : futex(startInitialized ? INITIALIZED : UNINITIALIZED) {}
267 #else
268 Once(bool startInitialized = false);
269 ~Once();
270 #endif
271 KJ_DISALLOW_COPY_AND_MOVE(Once);
272
273 class Initializer {
274 public:
275 virtual void run() = 0;
276 };
277
278 void runOnce(Initializer& init, LockSourceLocationArg location);
279
280 #if _WIN32 || __CYGWIN__ // TODO(perf): Can we make this inline on win32 somehow?
281 bool isInitialized() noexcept;
282
283 #else
284 inline bool isInitialized() noexcept {
285 // Fast path check to see if runOnce() would simply return immediately.
286 #if KJ_USE_FUTEX
287 return __atomic_load_n(&futex, __ATOMIC_ACQUIRE) == INITIALIZED;
288 #else
289 return __atomic_load_n(&state, __ATOMIC_ACQUIRE) == INITIALIZED;
290 #endif
291 }
292 #endif
293
294 void reset();
295 // Returns the state from initialized to uninitialized. It is an error to call this when
296 // not already initialized, or when runOnce() or isInitialized() might be called concurrently in
297 // another thread.
298
299 private:
300 #if KJ_USE_FUTEX
301 uint futex;
302
303 enum State {
304 UNINITIALIZED,
305 INITIALIZING,
306 INITIALIZING_WITH_WAITERS,
307 INITIALIZED
308 };
309
310 #elif _WIN32 || __CYGWIN__
311 uintptr_t initOnce; // Actually an INIT_ONCE, but don't want to #include <windows.h> in header.
312
313 #else
314 enum State {
315 UNINITIALIZED,
316 INITIALIZED
317 };
318 State state;
319 pthread_mutex_t mutex;
320 #endif
321 };
322
323 } // namespace _ (private)
324
325 // =======================================================================================
326 // Public interface
327
328 template <typename T>
329 class Locked {
330 // Return type for `MutexGuarded<T>::lock()`. `Locked<T>` provides access to the bounded object
331 // and unlocks the mutex when it goes out of scope.
332
333 public:
334 KJ_DISALLOW_COPY(Locked);
335 inline Locked(): mutex(nullptr), ptr(nullptr) {}
336 inline Locked(Locked&& other): mutex(other.mutex), ptr(other.ptr) {
337 other.mutex = nullptr;
338 other.ptr = nullptr;
339 }
340 inline ~Locked() {
341 if (mutex != nullptr) mutex->unlock(isConst<T>() ? _::Mutex::SHARED : _::Mutex::EXCLUSIVE);
342 }
343
344 inline Locked& operator=(Locked&& other) {
345 if (mutex != nullptr) mutex->unlock(isConst<T>() ? _::Mutex::SHARED : _::Mutex::EXCLUSIVE);
346 mutex = other.mutex;
347 ptr = other.ptr;
348 other.mutex = nullptr;
349 other.ptr = nullptr;
350 return *this;
351 }
352
353 inline void release() {
354 if (mutex != nullptr) mutex->unlock(isConst<T>() ? _::Mutex::SHARED : _::Mutex::EXCLUSIVE);
355 mutex = nullptr;
356 ptr = nullptr;
357 }
358
359 inline T* operator->() { return ptr; }
360 inline const T* operator->() const { return ptr; }
361 inline T& operator*() { return *ptr; }
362 inline const T& operator*() const { return *ptr; }
363 inline T* get() { return ptr; }
364 inline const T* get() const { return ptr; }
365 inline operator T*() { return ptr; }
366 inline operator const T*() const { return ptr; }
367
368 template <typename Cond>
369 void wait(Cond&& condition, Maybe<Duration> timeout = nullptr,
370 LockSourceLocationArg location = {}) {
371 // Unlocks the lock until `condition(state)` evaluates true (where `state` is type `const T&`
372 // referencing the object protected by the lock).
373
374 // We can't wait on a shared lock because the internal bookkeeping needed for a wait requires
375 // the protection of an exclusive lock.
376 static_assert(!isConst<T>(), "cannot wait() on shared lock");
377
378 struct PredicateImpl final: public _::Mutex::Predicate {
379 bool check() override {
380 return condition(value);
381 }
382
383 Cond&& condition;
384 const T& value;
385
386 PredicateImpl(Cond&& condition, const T& value)
387 : condition(kj::fwd<Cond>(condition)), value(value) {}
388 };
389
390 PredicateImpl impl(kj::fwd<Cond>(condition), *ptr);
391 mutex->wait(impl, timeout, location);
392 }
393
394 private:
395 _::Mutex* mutex;
396 T* ptr;
397
398 inline Locked(_::Mutex& mutex, T& value): mutex(&mutex), ptr(&value) {}
399
400 template <typename U>
401 friend class MutexGuarded;
402 template <typename U>
403 friend class ExternalMutexGuarded;
404
405 #if KJ_MUTEX_TEST
406 public:
407 #endif
408 void induceSpuriousWakeupForTest() { mutex->induceSpuriousWakeupForTest(); }
409 // Utility method for mutex-test.c++ which causes a spurious thread wakeup on all threads that
410 // are waiting for a when() condition. Assuming correct implementation, all those threads should
411 // immediately go back to sleep.
412 };
413
414 template <typename T>
415 class MutexGuarded {
416 // An object of type T, bounded by a mutex. In order to access the object, you must lock it.
417 //
418 // Write locks are not "recursive" -- trying to lock again in a thread that already holds a lock
419 // will deadlock. Recursive write locks are usually a sign of bad design.
420 //
421 // Unfortunately, **READ LOCKS ARE NOT RECURSIVE** either. Common sense says they should be.
422 // But on many operating systems (BSD, OSX), recursively read-locking a pthread_rwlock is
423 // actually unsafe. The problem is that writers are "prioritized" over readers, so a read lock
424 // request will block if any write lock requests are outstanding. So, if thread A takes a read
425 // lock, thread B requests a write lock (and starts waiting), and then thread A tries to take
426 // another read lock recursively, the result is deadlock.
427
428 public:
429 template <typename... Params>
430 explicit MutexGuarded(Params&&... params);
431 // Initialize the mutex-bounded object by passing the given parameters to its constructor.
432
433 Locked<T> lockExclusive(LockSourceLocationArg location = {}) const;
434 // Exclusively locks the object and returns it. The returned `Locked<T>` can be passed by
435 // move, similar to `Own<T>`.
436 //
437 // This method is declared `const` in accordance with KJ style rules which say that constness
438 // should be used to indicate thread-safety. It is safe to share a const pointer between threads,
439 // but it is not safe to share a mutable pointer. Since the whole point of MutexGuarded is to
440 // be shared between threads, its methods should be const, even though locking it produces a
441 // non-const pointer to the contained object.
442
443 Locked<const T> lockShared(LockSourceLocationArg location = {}) const;
444 // Lock the value for shared access. Multiple shared locks can be taken concurrently, but cannot
445 // be held at the same time as a non-shared lock.
446
447 Maybe<Locked<T>> lockExclusiveWithTimeout(Duration timeout,
448 LockSourceLocationArg location = {}) const;
449 // Attempts to exclusively lock the object. If the timeout elapses before the lock is acquired,
450 // this returns null.
451
452 Maybe<Locked<const T>> lockSharedWithTimeout(Duration timeout,
453 LockSourceLocationArg location = {}) const;
454 // Attempts to lock the value for shared access. If the timeout elapses before the lock is acquired,
455 // this returns null.
456
457 inline const T& getWithoutLock() const { return value; }
458 inline T& getWithoutLock() { return value; }
459 // Escape hatch for cases where some external factor guarantees that it's safe to get the
460 // value. You should treat these like const_cast -- be highly suspicious of any use.
461
462 inline const T& getAlreadyLockedShared() const;
463 inline T& getAlreadyLockedShared();
464 inline T& getAlreadyLockedExclusive() const;
465 // Like `getWithoutLock()`, but asserts that the lock is already held by the calling thread.
466
467 template <typename Cond, typename Func>
468 auto when(Cond&& condition, Func&& callback, Maybe<Duration> timeout = nullptr,
469 LockSourceLocationArg location = {}) const
470 -> decltype(callback(instance<T&>())) {
471 // Waits until condition(state) returns true, then calls callback(state) under lock.
472 //
473 // `condition`, when called, receives as its parameter a const reference to the state, which is
474 // locked (either shared or exclusive). `callback` receives a mutable reference, which is
475 // exclusively locked.
476 //
477 // `condition()` may be called multiple times, from multiple threads, while waiting for the
478 // condition to become true. It may even return true once, but then be called more times.
479 // It is guaranteed, though, that at the time `callback()` is finally called, `condition()`
480 // would currently return true (assuming it is a pure function of the guarded data).
481 //
482 // If `timeout` is specified, then after the given amount of time, the callback will be called
483 // regardless of whether the condition is true. In this case, when `callback()` is called,
484 // `condition()` may in fact evaluate false, but *only* if the timeout was reached.
485 //
486 // TODO(cleanup): lock->wait() is a better interface. Can we deprecate this one?
487
488 auto lock = lockExclusive();
489 lock.wait(kj::fwd<Cond>(condition), timeout, location);
490 return callback(value);
491 }
492
493 private:
494 mutable _::Mutex mutex;
495 mutable T value;
496 };
497
498 template <typename T>
499 class MutexGuarded<const T> {
500 // MutexGuarded cannot guard a const type. This would be pointless anyway, and would complicate
501 // the implementation of Locked<T>, which uses constness to decide what kind of lock it holds.
502 static_assert(sizeof(T) < 0, "MutexGuarded's type cannot be const.");
503 };
504
505 template <typename T>
506 class ExternalMutexGuarded {
507 // Holds a value that can only be manipulated while some other mutex is locked.
508 //
509 // The ExternalMutexGuarded<T> lives *outside* the scope of any lock on the mutex, but ensures
510 // that the value it holds can only be accessed under lock by forcing the caller to present a
511 // lock before accessing the value.
512 //
513 // Additionally, ExternalMutexGuarded<T>'s destructor will take an exclusive lock on the mutex
514 // while destroying the held value, unless the value has been release()ed before hand.
515 //
516 // The type T must have the following properties (which probably all movable types satisfy):
517 // - T is movable.
518 // - Immediately after any of the following has happened, T's destructor is effectively a no-op
519 // (hence certainly not requiring locks):
520 // - The value has been default-constructed.
521 // - The value has been initialized by-move from a default-constructed T.
522 // - The value has been moved away.
523 // - If ExternalMutexGuarded<T> is ever moved, then T must have a move constructor and move
524 // assignment operator that do not follow any pointers, therefore do not need to take a lock.
525 //
526 // Inherits from LockSourceLocation to perform an empty base class optimization when lock tracking
527 // is compiled out. Once the minimum C++ standard for the KJ library is C++20, this optimization
528 // could be replaced by a member variable with a [[no_unique_address]] annotation.
529 public:
530 ExternalMutexGuarded(LockSourceLocationArg location = {})
531 : location(location) {}
532
533 template <typename U, typename... Params>
534 ExternalMutexGuarded(Locked<U> lock, Params&&... params, LockSourceLocationArg location = {})
535 : mutex(lock.mutex),
536 value(kj::fwd<Params>(params)...),
537 location(location) {}
538 // Construct the value in-place. This constructor requires passing ownership of the lock into
539 // the constructor. Normally this should be a lock that you take on the line calling the
540 // constructor, like:
541 //
542 // ExternalMutexGuarded<T> foo(someMutexGuarded.lockExclusive());
543 //
544 // The reason this constructor does not accept an lvalue reference to an existing lock is because
545 // this would be deadlock-prone: If an exception were thrown immediately after the constructor
546 // completed, then the destructor would deadlock, because the lock would still be held. An
547 // ExternalMutexGuarded must live outside the scope of any locks to avoid such a deadlock.
548
549 ~ExternalMutexGuarded() noexcept(false) {
550 if (mutex != nullptr) {
551 mutex->lock(_::Mutex::EXCLUSIVE, nullptr, location);
552 KJ_DEFER(mutex->unlock(_::Mutex::EXCLUSIVE));
553 value = T();
554 }
555 }
556
557 ExternalMutexGuarded(ExternalMutexGuarded&& other)
558 : mutex(other.mutex), value(kj::mv(other.value)), location(other.location) {
559 other.mutex = nullptr;
560 }
561 ExternalMutexGuarded& operator=(ExternalMutexGuarded&& other) {
562 mutex = other.mutex;
563 value = kj::mv(other.value);
564 location = other.location;
565 other.mutex = nullptr;
566 return *this;
567 }
568
569 template <typename U>
570 void set(Locked<U>& lock, T&& newValue) {
571 KJ_IREQUIRE(mutex == nullptr);
572 mutex = lock.mutex;
573 value = kj::mv(newValue);
574 }
575
576 template <typename U>
577 T& get(Locked<U>& lock) {
578 KJ_IREQUIRE(lock.mutex == mutex);
579 return value;
580 }
581
582 template <typename U>
583 const T& get(Locked<const U>& lock) const {
584 KJ_IREQUIRE(lock.mutex == mutex);
585 return value;
586 }
587
588 template <typename U>
589 T release(Locked<U>& lock) {
590 // Release (move away) the value. This allows the destructor to skip locking the mutex.
591 KJ_IREQUIRE(lock.mutex == mutex);
592 T result = kj::mv(value);
593 mutex = nullptr;
594 return result;
595 }
596
597 private:
598 _::Mutex* mutex = nullptr;
599 T value;
600 KJ_NO_UNIQUE_ADDRESS LockSourceLocation location;
601 // When built against C++20 (or clang >= 9.0), the overhead of this is elided. Otherwise this
602 // struct will be 1 byte larger than it would otherwise be.
603 };
604
605 template <typename T>
606 class Lazy {
607 // A lazily-initialized value.
608
609 public:
610 template <typename Func>
611 T& get(Func&& init, LockSourceLocationArg location = {});
612 template <typename Func>
613 const T& get(Func&& init, LockSourceLocationArg location = {}) const;
614 // The first thread to call get() will invoke the given init function to construct the value.
615 // Other threads will block until construction completes, then return the same value.
616 //
617 // `init` is a functor(typically a lambda) which takes `SpaceFor<T>&` as its parameter and returns
618 // `Own<T>`. If `init` throws an exception, the exception is propagated out of that thread's
619 // call to `get()`, and subsequent calls behave as if `get()` hadn't been called at all yet --
620 // in other words, subsequent calls retry initialization until it succeeds.
621
622 private:
623 mutable _::Once once;
624 mutable SpaceFor<T> space;
625 mutable Own<T> value;
626
627 template <typename Func>
628 class InitImpl;
629 };
630
631 // =======================================================================================
632 // Inline implementation details
633
634 template <typename T>
635 template <typename... Params>
636 inline MutexGuarded<T>::MutexGuarded(Params&&... params)
637 : value(kj::fwd<Params>(params)...) {}
638
639 template <typename T>
640 inline Locked<T> MutexGuarded<T>::lockExclusive(LockSourceLocationArg location)
641 const {
642 mutex.lock(_::Mutex::EXCLUSIVE, nullptr, location);
643 return Locked<T>(mutex, value);
644 }
645
646 template <typename T>
647 inline Locked<const T> MutexGuarded<T>::lockShared(LockSourceLocationArg location) const {
648 mutex.lock(_::Mutex::SHARED, nullptr, location);
649 return Locked<const T>(mutex, value);
650 }
651
652 template <typename T>
653 inline Maybe<Locked<T>> MutexGuarded<T>::lockExclusiveWithTimeout(Duration timeout,
654 LockSourceLocationArg location) const {
655 if (mutex.lock(_::Mutex::EXCLUSIVE, timeout, location)) {
656 return Locked<T>(mutex, value);
657 } else {
658 return nullptr;
659 }
660 }
661
662 template <typename T>
663 inline Maybe<Locked<const T>> MutexGuarded<T>::lockSharedWithTimeout(Duration timeout,
664 LockSourceLocationArg location) const {
665 if (mutex.lock(_::Mutex::SHARED, timeout, location)) {
666 return Locked<const T>(mutex, value);
667 } else {
668 return nullptr;
669 }
670 }
671
672 template <typename T>
673 inline const T& MutexGuarded<T>::getAlreadyLockedShared() const {
674 #ifdef KJ_DEBUG
675 mutex.assertLockedByCaller(_::Mutex::SHARED);
676 #endif
677 return value;
678 }
679 template <typename T>
680 inline T& MutexGuarded<T>::getAlreadyLockedShared() {
681 #ifdef KJ_DEBUG
682 mutex.assertLockedByCaller(_::Mutex::SHARED);
683 #endif
684 return value;
685 }
686 template <typename T>
687 inline T& MutexGuarded<T>::getAlreadyLockedExclusive() const {
688 #ifdef KJ_DEBUG
689 mutex.assertLockedByCaller(_::Mutex::EXCLUSIVE);
690 #endif
691 return const_cast<T&>(value);
692 }
693
694 template <typename T>
695 template <typename Func>
696 class Lazy<T>::InitImpl: public _::Once::Initializer {
697 public:
698 inline InitImpl(const Lazy<T>& lazy, Func&& func): lazy(lazy), func(kj::fwd<Func>(func)) {}
699
700 void run() override {
701 lazy.value = func(lazy.space);
702 }
703
704 private:
705 const Lazy<T>& lazy;
706 Func func;
707 };
708
709 template <typename T>
710 template <typename Func>
711 inline T& Lazy<T>::get(Func&& init, LockSourceLocationArg location) {
712 if (!once.isInitialized()) {
713 InitImpl<Func> initImpl(*this, kj::fwd<Func>(init));
714 once.runOnce(initImpl, location);
715 }
716 return *value;
717 }
718
719 template <typename T>
720 template <typename Func>
721 inline const T& Lazy<T>::get(Func&& init, LockSourceLocationArg location) const {
722 if (!once.isInitialized()) {
723 InitImpl<Func> initImpl(*this, kj::fwd<Func>(init));
724 once.runOnce(initImpl, location);
725 }
726 return *value;
727 }
728
729 #if KJ_TRACK_LOCK_BLOCKING
730 struct BlockedOnMutexAcquisition {
731 const _::Mutex& mutex;
732 // The mutex we are blocked on.
733
734 const SourceLocation& origin;
735 // Where did the blocking operation originate from.
736 };
737
738 struct BlockedOnCondVarWait {
739 const _::Mutex& mutex;
740 // The mutex the condition variable is using (may or may not be locked).
741
742 const void* waiter;
743 // Pointer to the waiter that's being waited on.
744
745 const SourceLocation& origin;
746 // Where did the blocking operation originate from.
747 };
748
749 struct BlockedOnOnceInit {
750 const _::Once& once;
751
752 const SourceLocation& origin;
753 // Where did the blocking operation originate from.
754 };
755
756 using BlockedOnReason = OneOf<BlockedOnMutexAcquisition, BlockedOnCondVarWait, BlockedOnOnceInit>;
757
758 Maybe<const BlockedOnReason&> blockedReason() noexcept;
759 // Returns the information about the reason the current thread is blocked synchronously on KJ
760 // lock primitives. Returns nullptr if the current thread is not currently blocked on such
761 // primitives. This is intended to be called from a signal handler to check whether the current
762 // thread is blocked. Outside of a signal handler there is little value to this function. In those
763 // cases by definition the thread is not blocked. This includes the callable used as part of a
764 // condition variable since that happens after the lock is acquired & the current thread is no
765 // longer blocked). The utility could be made useful for non-signal handler use-cases by being able
766 // to fetch the pointer to the TLS variable directly (i.e. const BlockedOnReason&*). However, there
767 // would have to be additional changes/complexity to try make that work since you'd need
768 // synchronization to ensure that the memory you'd try to reference is still valid. The likely
769 // solution would be to make these mutually exclusive options where you can use either the fast
770 // async-safe option, or a mutex-guarded TLS variable you can get a reference to that isn't
771 // async-safe. That being said, maybe someone can come up with a way to make something that works
772 // in both use-cases which would of course be more preferable.
773 #endif
774
775
776 } // namespace kj
777
778 KJ_END_HEADER