diff options
author | Mitch Phillips <31459023+hctim@users.noreply.github.com> | 2021-02-02 14:32:28 -0800 |
---|---|---|
committer | Mitch Phillips <mitchp@google.com> | 2021-02-10 10:35:36 -0800 |
commit | a6921b6e8c94531b3e49ce7df6d59f20a8fd20eb (patch) | |
tree | 1eb42fa95b5f13314b2fac5ee9f02f4dd307f55b /gwp_asan/guarded_pool_allocator.cpp | |
parent | 89447be18b6414c84693c39498e1dcd231dfcb9e (diff) | |
download | gwp_asan-a6921b6e8c94531b3e49ce7df6d59f20a8fd20eb.tar.gz |
[GWP-ASan] Add locking around unwinder for atfork protection.
Unwinders (like libc's backtrace()) can call their own locks (like the
libdl lock). We need to let the unwinder release the locks before
forking. Wrap a new lock around the unwinder for atfork protection.
Reviewed By: eugenis
Differential Revision: https://reviews.llvm.org/D95889
GitOrigin-RevId: 30973f6fe01cc0a9624147466f0c54b91a1b61d7
Change-Id: Ie6954087f30ca478843c7d69b6e3282ba5f7a2a4
Diffstat (limited to 'gwp_asan/guarded_pool_allocator.cpp')
-rw-r--r-- | gwp_asan/guarded_pool_allocator.cpp | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/gwp_asan/guarded_pool_allocator.cpp b/gwp_asan/guarded_pool_allocator.cpp index 86304d9..5e3455e 100644 --- a/gwp_asan/guarded_pool_allocator.cpp +++ b/gwp_asan/guarded_pool_allocator.cpp @@ -103,9 +103,15 @@ void GuardedPoolAllocator::init(const options::Options &Opts) { installAtFork(); } -void GuardedPoolAllocator::disable() { PoolMutex.lock(); } +void GuardedPoolAllocator::disable() { + PoolMutex.lock(); + BacktraceMutex.lock(); +} -void GuardedPoolAllocator::enable() { PoolMutex.unlock(); } +void GuardedPoolAllocator::enable() { + PoolMutex.unlock(); + BacktraceMutex.unlock(); +} void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb, void *Arg) { @@ -232,7 +238,10 @@ void *GuardedPoolAllocator::allocate(size_t Size, size_t Alignment) { roundUpTo(Size, PageSize)); Meta->RecordAllocation(UserPtr, Size); - Meta->AllocationTrace.RecordBacktrace(Backtrace); + { + ScopedLock UL(BacktraceMutex); + Meta->AllocationTrace.RecordBacktrace(Backtrace); + } return reinterpret_cast<void *>(UserPtr); } @@ -281,6 +290,7 @@ void GuardedPoolAllocator::deallocate(void *Ptr) { // otherwise non-reentrant unwinders may deadlock. if (!getThreadLocals()->RecursiveGuard) { ScopedRecursiveGuard SRG; + ScopedLock UL(BacktraceMutex); Meta->DeallocationTrace.RecordBacktrace(Backtrace); } } |