summaryrefslogtreecommitdiff
path: root/gwp_asan
diff options
context:
space:
mode:
authorMitch Phillips <31459023+hctim@users.noreply.github.com>2021-02-02 14:32:28 -0800
committerMitch Phillips <mitchp@google.com>2021-02-10 10:35:36 -0800
commita6921b6e8c94531b3e49ce7df6d59f20a8fd20eb (patch)
tree1eb42fa95b5f13314b2fac5ee9f02f4dd307f55b /gwp_asan
parent89447be18b6414c84693c39498e1dcd231dfcb9e (diff)
downloadgwp_asan-a6921b6e8c94531b3e49ce7df6d59f20a8fd20eb.tar.gz
[GWP-ASan] Add locking around unwinder for atfork protection.
Unwinders (like libc's backtrace()) can call their own locks (like the libdl lock). We need to let the unwinder release the locks before forking. Wrap a new lock around the unwinder for atfork protection. Reviewed By: eugenis Differential Revision: https://reviews.llvm.org/D95889 GitOrigin-RevId: 30973f6fe01cc0a9624147466f0c54b91a1b61d7 Change-Id: Ie6954087f30ca478843c7d69b6e3282ba5f7a2a4
Diffstat (limited to 'gwp_asan')
-rw-r--r--gwp_asan/guarded_pool_allocator.cpp16
-rw-r--r--gwp_asan/guarded_pool_allocator.h4
2 files changed, 17 insertions, 3 deletions
diff --git a/gwp_asan/guarded_pool_allocator.cpp b/gwp_asan/guarded_pool_allocator.cpp
index 86304d9..5e3455e 100644
--- a/gwp_asan/guarded_pool_allocator.cpp
+++ b/gwp_asan/guarded_pool_allocator.cpp
@@ -103,9 +103,15 @@ void GuardedPoolAllocator::init(const options::Options &Opts) {
installAtFork();
}
-void GuardedPoolAllocator::disable() { PoolMutex.lock(); }
+void GuardedPoolAllocator::disable() {
+ PoolMutex.lock();
+ BacktraceMutex.lock();
+}
-void GuardedPoolAllocator::enable() { PoolMutex.unlock(); }
+void GuardedPoolAllocator::enable() {
+ PoolMutex.unlock();
+ BacktraceMutex.unlock();
+}
void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
void *Arg) {
@@ -232,7 +238,10 @@ void *GuardedPoolAllocator::allocate(size_t Size, size_t Alignment) {
roundUpTo(Size, PageSize));
Meta->RecordAllocation(UserPtr, Size);
- Meta->AllocationTrace.RecordBacktrace(Backtrace);
+ {
+ ScopedLock UL(BacktraceMutex);
+ Meta->AllocationTrace.RecordBacktrace(Backtrace);
+ }
return reinterpret_cast<void *>(UserPtr);
}
@@ -281,6 +290,7 @@ void GuardedPoolAllocator::deallocate(void *Ptr) {
// otherwise non-reentrant unwinders may deadlock.
if (!getThreadLocals()->RecursiveGuard) {
ScopedRecursiveGuard SRG;
+ ScopedLock UL(BacktraceMutex);
Meta->DeallocationTrace.RecordBacktrace(Backtrace);
}
}
diff --git a/gwp_asan/guarded_pool_allocator.h b/gwp_asan/guarded_pool_allocator.h
index 86521f9..26a4599 100644
--- a/gwp_asan/guarded_pool_allocator.h
+++ b/gwp_asan/guarded_pool_allocator.h
@@ -196,6 +196,10 @@ private:
// A mutex to protect the guarded slot and metadata pool for this class.
Mutex PoolMutex;
+ // Some unwinders can grab the libdl lock. In order to provide atfork
+ // protection, we need to ensure that we allow an unwinding thread to release
+ // the libdl lock before forking.
+ Mutex BacktraceMutex;
// Record the number allocations that we've sampled. We store this amount so
// that we don't randomly choose to recycle a slot that previously had an
// allocation before all the slots have been utilised.