summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Collingbourne <peter@pcc.me.uk>2021-05-12 15:29:41 -0700
committerPeter Collingbourne <pcc@google.com>2021-05-12 18:05:51 -0700
commit185fd5671e8e5152f7f18fd8b1c4af0f709b74ae (patch)
treebff7475e69a9de7c3af688dfc7ad83e884ded09e
parent3ae243e51a54dc7aa47b3dad541a8a3f885113cf (diff)
downloadscudo-185fd5671e8e5152f7f18fd8b1c4af0f709b74ae.tar.gz
scudo: Require fault address to be in bounds for UAF.
The bounds check that we previously had here was suitable for secondary allocations but not for UAF on primary allocations, where it is likely to result in false positives. Fix it by using a different bounds check for UAF that requires the fault address to be in bounds. Differential Revision: https://reviews.llvm.org/D102376 GitOrigin-RevId: 6732a5328cf03872d53827d3e0e283fcf16b551a Change-Id: I895708b953bf41ad8144f23c965ce8d9563aa226
-rw-r--r--standalone/combined.h31
1 files changed, 22 insertions, 9 deletions
diff --git a/standalone/combined.h b/standalone/combined.h
index 529c042633f..70aeaa8ab7a 100644
--- a/standalone/combined.h
+++ b/standalone/combined.h
@@ -1333,22 +1333,35 @@ private:
--I) {
auto *Entry = &RingBuffer->Entries[I % AllocationRingBuffer::NumEntries];
uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
- uptr UntaggedEntryPtr = untagPointer(EntryPtr);
- uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
- if (!EntryPtr || FaultAddr < EntryPtr - getPageSizeCached() ||
- FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
+ if (!EntryPtr)
continue;
+ uptr UntaggedEntryPtr = untagPointer(EntryPtr);
+ uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
- // For UAF the ring buffer will contain two entries, one for the
- // allocation and another for the deallocation. Don't report buffer
- // overflow/underflow using the allocation entry if we have already
- // collected a report from the deallocation entry.
- if (!DeallocationTrace) {
+ if (DeallocationTid) {
+ // For UAF we only consider in-bounds fault addresses because
+ // out-of-bounds UAF is rare and attempting to detect it is very likely
+ // to result in false positives.
+ if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
+ continue;
+ } else {
+ // Ring buffer OOB is only possible with secondary allocations. In this
+ // case we are guaranteed a guard region of at least a page on either
+ // side of the allocation (guard page on the right, guard page + tagged
+ // region on the left), so ignore any faults outside of that range.
+ if (FaultAddr < EntryPtr - getPageSizeCached() ||
+ FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
+ continue;
+
+ // For UAF the ring buffer will contain two entries, one for the
+ // allocation and another for the deallocation. Don't report buffer
+ // overflow/underflow using the allocation entry if we have already
+ // collected a report from the deallocation entry.
bool Found = false;
for (uptr J = 0; J != NextErrorReport; ++J) {
if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {