summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Collingbourne <peter@pcc.me.uk>2021-05-14 04:38:25 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2021-05-14 04:38:25 +0000
commit3cd2ce3aa70bcf2695bc9e16c234a7a6000cae59 (patch)
tree3c141c900d5f5ccbc9e318050e373fe75f799e63
parentbccf0d417b0526018a82115fed3e9d64b41d74d8 (diff)
parentc727119a49d1d6b6ef55e6fc4003d95dc3929f34 (diff)
downloadscudo-3cd2ce3aa70bcf2695bc9e16c234a7a6000cae59.tar.gz
scudo: Fix MTE error reporting for zero-sized allocations. am: c727119a49
Original change: https://android-review.googlesource.com/c/platform/external/scudo/+/1707708 Change-Id: I1c4aa6c5fbf60bfd5271a2abbb4938152acb6d7f
-rw-r--r--standalone/combined.h52
1 files changed, 34 insertions, 18 deletions
diff --git a/standalone/combined.h b/standalone/combined.h
index 95fc8d9bbbe..8080d677d7b 100644
--- a/standalone/combined.h
+++ b/standalone/combined.h
@@ -437,7 +437,7 @@ public:
if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
PrevEnd = NextPage;
TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
- resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, BlockEnd);
+ resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
// If an allocation needs to be zeroed (i.e. calloc) we can normally
// avoid zeroing the memory now since we can rely on memory having
@@ -643,7 +643,7 @@ public:
if (ClassId) {
resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
- BlockEnd);
+ NewSize, BlockEnd);
storePrimaryAllocationStackMaybe(Options, OldPtr);
} else {
storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
@@ -1143,6 +1143,27 @@ private:
return Offset + Chunk::getHeaderSize();
}
+ // Set the tag of the granule past the end of the allocation to 0, to catch
+ // linear overflows even if a previous larger allocation used the same block
+ // and tag. Only do this if the granule past the end is in our block, because
+ // this would otherwise lead to a SEGV if the allocation covers the entire
+ // block and our block is at the end of a mapping. The tag of the next block's
+ // header granule will be set to 0, so it will serve the purpose of catching
+ // linear overflows in this case.
+ //
+ // For allocations of size 0 we do not end up storing the address tag to the
+ // memory tag space, which getInlineErrorInfo() normally relies on to match
+ // address tags against chunks. To allow matching in this case we store the
+ // address tag in the first byte of the chunk.
+ void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
+ uptr UntaggedEnd = untagPointer(End);
+ if (UntaggedEnd != BlockEnd) {
+ storeTag(UntaggedEnd);
+ if (Size == 0)
+ *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
+ }
+ }
+
void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
uptr BlockEnd) {
// Prepare the granule before the chunk to store the chunk header by setting
@@ -1155,25 +1176,17 @@ private:
uptr TaggedBegin, TaggedEnd;
setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
- // Finally, set the tag of the granule past the end of the allocation to 0,
- // to catch linear overflows even if a previous larger allocation used the
- // same block and tag. Only do this if the granule past the end is in our
- // block, because this would otherwise lead to a SEGV if the allocation
- // covers the entire block and our block is at the end of a mapping. The tag
- // of the next block's header granule will be set to 0, so it will serve the
- // purpose of catching linear overflows in this case.
- uptr UntaggedEnd = untagPointer(TaggedEnd);
- if (UntaggedEnd != BlockEnd)
- storeTag(UntaggedEnd);
+ storeEndMarker(TaggedEnd, Size, BlockEnd);
return reinterpret_cast<void *>(TaggedBegin);
}
- void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
+ void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
+ uptr BlockEnd) {
uptr RoundOldPtr = roundUpTo(OldPtr, archMemoryTagGranuleSize());
uptr RoundNewPtr;
if (RoundOldPtr >= NewPtr) {
// If the allocation is shrinking we just need to set the tag past the end
- // of the allocation to 0. See explanation in prepareTaggedChunk above.
+ // of the allocation to 0. See explanation in storeEndMarker() above.
RoundNewPtr = roundUpTo(NewPtr, archMemoryTagGranuleSize());
} else {
// Set the memory tag of the region
@@ -1181,10 +1194,7 @@ private:
// to the pointer tag stored in OldPtr.
RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
}
-
- uptr UntaggedNewPtr = untagPointer(RoundNewPtr);
- if (UntaggedNewPtr != BlockEnd)
- storeTag(UntaggedNewPtr);
+ storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
}
void storePrimaryAllocationStackMaybe(Options Options, void *Ptr) {
@@ -1293,6 +1303,12 @@ private:
*Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
ChunkBegin - Chunk::getHeaderSize());
*Data = reinterpret_cast<const u32 *>(ChunkBegin);
+
+ // Allocations of size 0 will have stashed the tag in the first byte of
+ // the chunk, see storeEndMarker().
+ if (Header->SizeOrUnusedBytes == 0)
+ *Tag = static_cast<u8>(*ChunkBegin);
+
return true;
};