summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.bp13
-rw-r--r--config/config_build_check.cpp7
-rw-r--r--config/custom_scudo_config.h12
-rw-r--r--standalone/combined.h371
4 files changed, 236 insertions, 167 deletions
diff --git a/Android.bp b/Android.bp
index 2067b271e72..88435b917bc 100644
--- a/Android.bp
+++ b/Android.bp
@@ -71,9 +71,6 @@ cc_defaults {
cflags: [
// Use a custom Android configuration.
"-DSCUDO_USE_CUSTOM_CONFIG",
-
- // Enable the svelte config by default.
- "-DSVELTE_ENABLED",
],
include_dirs: [
@@ -81,8 +78,8 @@ cc_defaults {
],
product_variables: {
- malloc_not_svelte: {
- cflags: ["-USVELTE_ENABLED"],
+ malloc_low_memory: {
+ cflags: ["-DSCUDO_LOW_MEMORY"],
},
},
}
@@ -347,15 +344,13 @@ cc_defaults {
host_supported: true,
srcs: ["config/config_build_check.cpp"],
- cflags: ["-DSVELTE_CHECK"],
-
include_dirs: [
"external/scudo/standalone",
],
product_variables: {
- malloc_not_svelte: {
- cflags: ["-USVELTE_CHECK"],
+ malloc_low_memory: {
+ cflags: ["-DSCUDO_LOW_MEMORY_CHECK"],
},
},
}
diff --git a/config/config_build_check.cpp b/config/config_build_check.cpp
index 00eff3c0dc0..3f97fc7514e 100644
--- a/config/config_build_check.cpp
+++ b/config/config_build_check.cpp
@@ -33,9 +33,10 @@
#include "allocator_config.h"
-#if defined(SVELTE_CHECK)
-static_assert(std::is_same<scudo::Config, scudo::AndroidSvelteConfig>() == true,
- "Svelte is enabled, but AndroidSvelteConfig is not the default");
+#if defined(SCUDO_LOW_MEMORY_CHECK)
+static_assert(
+ std::is_same<scudo::Config, scudo::AndroidLowMemoryConfig>() == true,
+ "Low Memory is enabled, but AndroidLowMemoryConfig is not the default");
#else
static_assert(std::is_same<scudo::Config, scudo::AndroidNormalConfig>() == true,
"Not using AndrodNormalConfig as the default");
diff --git a/config/custom_scudo_config.h b/config/custom_scudo_config.h
index 5e85d0fe1a7..a0fa59230ce 100644
--- a/config/custom_scudo_config.h
+++ b/config/custom_scudo_config.h
@@ -107,8 +107,9 @@ struct AndroidNormalConfig {
static const uptr GroupSizeLog = 18U;
typedef uptr CompactPtrT;
#endif
- static const s32 MinReleaseToOsIntervalMs = 1000;
+ static const s32 MinReleaseToOsIntervalMs = -1;
static const s32 MaxReleaseToOsIntervalMs = 1000;
+ static const s32 DefaultReleaseToOsIntervalMs = 1000;
};
#if SCUDO_CAN_USE_PRIMARY64
template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
@@ -122,8 +123,9 @@ struct AndroidNormalConfig {
static const u32 QuarantineSize = 32U;
static const u32 DefaultMaxEntriesCount = 32U;
static const uptr DefaultMaxEntrySize = 2UL << 20;
- static const s32 MinReleaseToOsIntervalMs = 0;
+ static const s32 MinReleaseToOsIntervalMs = -1;
static const s32 MaxReleaseToOsIntervalMs = 1000;
+ static const s32 DefaultReleaseToOsIntervalMs = 0;
};
template <typename Config> using CacheT = MapAllocatorCache<Config>;
};
@@ -131,7 +133,7 @@ struct AndroidNormalConfig {
template <typename Config> using SecondaryT = MapAllocator<Config>;
};
-struct AndroidSvelteConfig {
+struct AndroidLowMemoryConfig {
#if defined(__aarch64__)
static const bool MaySupportMemoryTagging = true;
#else
@@ -171,8 +173,8 @@ struct AndroidSvelteConfig {
template <typename Config> using SecondaryT = MapAllocator<Config>;
};
-#if defined(SVELTE_ENABLED)
-typedef AndroidSvelteConfig Config;
+#if defined(SCUDO_LOW_MEMORY)
+typedef AndroidLowMemoryConfig Config;
#else
typedef AndroidNormalConfig Config;
#endif
diff --git a/standalone/combined.h b/standalone/combined.h
index 927513dea92..15a199ae034 100644
--- a/standalone/combined.h
+++ b/standalone/combined.h
@@ -410,133 +410,18 @@ public:
reportOutOfMemory(NeededSize);
}
- const uptr BlockUptr = reinterpret_cast<uptr>(Block);
- const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize();
- const uptr UserPtr = roundUp(UnalignedUserPtr, Alignment);
-
- void *Ptr = reinterpret_cast<void *>(UserPtr);
- void *TaggedPtr = Ptr;
- if (LIKELY(ClassId)) {
- // We only need to zero or tag the contents for Primary backed
- // allocations. We only set tags for primary allocations in order to avoid
- // faulting potentially large numbers of pages for large secondary
- // allocations. We assume that guard pages are enough to protect these
- // allocations.
- //
- // FIXME: When the kernel provides a way to set the background tag of a
- // mapping, we should be able to tag secondary allocations as well.
- //
- // When memory tagging is enabled, zeroing the contents is done as part of
- // setting the tag.
- if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options))) {
- uptr PrevUserPtr;
- Chunk::UnpackedHeader Header;
- const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
- const uptr BlockEnd = BlockUptr + BlockSize;
- // If possible, try to reuse the UAF tag that was set by deallocate().
- // For simplicity, only reuse tags if we have the same start address as
- // the previous allocation. This handles the majority of cases since
- // most allocations will not be more aligned than the minimum alignment.
- //
- // We need to handle situations involving reclaimed chunks, and retag
- // the reclaimed portions if necessary. In the case where the chunk is
- // fully reclaimed, the chunk's header will be zero, which will trigger
- // the code path for new mappings and invalid chunks that prepares the
- // chunk from scratch. There are three possibilities for partial
- // reclaiming:
- //
- // (1) Header was reclaimed, data was partially reclaimed.
- // (2) Header was not reclaimed, all data was reclaimed (e.g. because
- // data started on a page boundary).
- // (3) Header was not reclaimed, data was partially reclaimed.
- //
- // Case (1) will be handled in the same way as for full reclaiming,
- // since the header will be zero.
- //
- // We can detect case (2) by loading the tag from the start
- // of the chunk. If it is zero, it means that either all data was
- // reclaimed (since we never use zero as the chunk tag), or that the
- // previous allocation was of size zero. Either way, we need to prepare
- // a new chunk from scratch.
- //
- // We can detect case (3) by moving to the next page (if covered by the
- // chunk) and loading the tag of its first granule. If it is zero, it
- // means that all following pages may need to be retagged. On the other
- // hand, if it is nonzero, we can assume that all following pages are
- // still tagged, according to the logic that if any of the pages
- // following the next page were reclaimed, the next page would have been
- // reclaimed as well.
- uptr TaggedUserPtr;
- if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) &&
- PrevUserPtr == UserPtr &&
- (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
- uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
- const uptr NextPage = roundUp(TaggedUserPtr, getPageSizeCached());
- if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
- PrevEnd = NextPage;
- TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
- resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
- if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
- // If an allocation needs to be zeroed (i.e. calloc) we can normally
- // avoid zeroing the memory now since we can rely on memory having
- // been zeroed on free, as this is normally done while setting the
- // UAF tag. But if tagging was disabled per-thread when the memory
- // was freed, it would not have been retagged and thus zeroed, and
- // therefore it needs to be zeroed now.
- memset(TaggedPtr, 0,
- Min(Size, roundUp(PrevEnd - TaggedUserPtr,
- archMemoryTagGranuleSize())));
- } else if (Size) {
- // Clear any stack metadata that may have previously been stored in
- // the chunk data.
- memset(TaggedPtr, 0, archMemoryTagGranuleSize());
- }
- } else {
- const uptr OddEvenMask =
- computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
- TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
- }
- storePrimaryAllocationStackMaybe(Options, Ptr);
- } else {
- Block = addHeaderTag(Block);
- Ptr = addHeaderTag(Ptr);
- if (UNLIKELY(FillContents != NoFill)) {
- // This condition is not necessarily unlikely, but since memset is
- // costly, we might as well mark it as such.
- memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
- PrimaryT::getSizeByClassId(ClassId));
- }
- }
- } else {
- Block = addHeaderTag(Block);
- Ptr = addHeaderTag(Ptr);
- if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options))) {
- storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
- storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
- }
+ const uptr UserPtr = roundUp(
+ reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize(), Alignment);
+ const uptr SizeOrUnusedBytes =
+ ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size);
+
+ if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options))) {
+ return initChunk(ClassId, Origin, Block, UserPtr, SizeOrUnusedBytes,
+ FillContents);
}
- Chunk::UnpackedHeader Header = {};
- if (UNLIKELY(UnalignedUserPtr != UserPtr)) {
- const uptr Offset = UserPtr - UnalignedUserPtr;
- DCHECK_GE(Offset, 2 * sizeof(u32));
- // The BlockMarker has no security purpose, but is specifically meant for
- // the chunk iteration function that can be used in debugging situations.
- // It is the only situation where we have to locate the start of a chunk
- // based on its block address.
- reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
- reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
- Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
- }
- Header.ClassId = ClassId & Chunk::ClassIdMask;
- Header.State = Chunk::State::Allocated;
- Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
- Header.SizeOrUnusedBytes =
- (ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size)) &
- Chunk::SizeOrUnusedBytesMask;
- Chunk::storeHeader(Cookie, Ptr, &Header);
-
- return TaggedPtr;
+ return initChunkWithMemoryTagging(ClassId, Origin, Block, UserPtr, Size,
+ SizeOrUnusedBytes, FillContents);
}
NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
@@ -1163,6 +1048,175 @@ private:
reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
}
+ ALWAYS_INLINE void *initChunk(const uptr ClassId, const Chunk::Origin Origin,
+ void *Block, const uptr UserPtr,
+ const uptr SizeOrUnusedBytes,
+ const FillContentsMode FillContents) {
+ Block = addHeaderTag(Block);
+ // Only do content fill when it's from primary allocator because secondary
+ // allocator has filled the content.
+ if (ClassId != 0 && UNLIKELY(FillContents != NoFill)) {
+ // This condition is not necessarily unlikely, but since memset is
+ // costly, we might as well mark it as such.
+ memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
+ PrimaryT::getSizeByClassId(ClassId));
+ }
+
+ Chunk::UnpackedHeader Header = {};
+
+ const uptr DefaultAlignedPtr =
+ reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
+ if (UNLIKELY(DefaultAlignedPtr != UserPtr)) {
+ const uptr Offset = UserPtr - DefaultAlignedPtr;
+ DCHECK_GE(Offset, 2 * sizeof(u32));
+ // The BlockMarker has no security purpose, but is specifically meant for
+ // the chunk iteration function that can be used in debugging situations.
+ // It is the only situation where we have to locate the start of a chunk
+ // based on its block address.
+ reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
+ reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
+ Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
+ }
+
+ Header.ClassId = ClassId & Chunk::ClassIdMask;
+ Header.State = Chunk::State::Allocated;
+ Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
+ Header.SizeOrUnusedBytes = SizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
+ Chunk::storeHeader(Cookie, reinterpret_cast<void *>(addHeaderTag(UserPtr)),
+ &Header);
+
+ return reinterpret_cast<void *>(UserPtr);
+ }
+
+ NOINLINE void *
+ initChunkWithMemoryTagging(const uptr ClassId, const Chunk::Origin Origin,
+ void *Block, const uptr UserPtr, const uptr Size,
+ const uptr SizeOrUnusedBytes,
+ const FillContentsMode FillContents) {
+ const Options Options = Primary.Options.load();
+ DCHECK(useMemoryTagging<AllocatorConfig>(Options));
+
+ void *Ptr = reinterpret_cast<void *>(UserPtr);
+ void *TaggedPtr = Ptr;
+
+ if (LIKELY(ClassId)) {
+ // Init the primary chunk.
+ //
+ // We only need to zero or tag the contents for Primary backed
+ // allocations. We only set tags for primary allocations in order to avoid
+ // faulting potentially large numbers of pages for large secondary
+ // allocations. We assume that guard pages are enough to protect these
+ // allocations.
+ //
+ // FIXME: When the kernel provides a way to set the background tag of a
+ // mapping, we should be able to tag secondary allocations as well.
+ //
+ // When memory tagging is enabled, zeroing the contents is done as part of
+ // setting the tag.
+
+ Chunk::UnpackedHeader Header;
+ const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
+ const uptr BlockUptr = reinterpret_cast<uptr>(Block);
+ const uptr BlockEnd = BlockUptr + BlockSize;
+ // If possible, try to reuse the UAF tag that was set by deallocate().
+ // For simplicity, only reuse tags if we have the same start address as
+ // the previous allocation. This handles the majority of cases since
+ // most allocations will not be more aligned than the minimum alignment.
+ //
+ // We need to handle situations involving reclaimed chunks, and retag
+ // the reclaimed portions if necessary. In the case where the chunk is
+ // fully reclaimed, the chunk's header will be zero, which will trigger
+ // the code path for new mappings and invalid chunks that prepares the
+ // chunk from scratch. There are three possibilities for partial
+ // reclaiming:
+ //
+ // (1) Header was reclaimed, data was partially reclaimed.
+ // (2) Header was not reclaimed, all data was reclaimed (e.g. because
+ // data started on a page boundary).
+ // (3) Header was not reclaimed, data was partially reclaimed.
+ //
+ // Case (1) will be handled in the same way as for full reclaiming,
+ // since the header will be zero.
+ //
+ // We can detect case (2) by loading the tag from the start
+ // of the chunk. If it is zero, it means that either all data was
+ // reclaimed (since we never use zero as the chunk tag), or that the
+ // previous allocation was of size zero. Either way, we need to prepare
+ // a new chunk from scratch.
+ //
+ // We can detect case (3) by moving to the next page (if covered by the
+ // chunk) and loading the tag of its first granule. If it is zero, it
+ // means that all following pages may need to be retagged. On the other
+ // hand, if it is nonzero, we can assume that all following pages are
+ // still tagged, according to the logic that if any of the pages
+ // following the next page were reclaimed, the next page would have been
+ // reclaimed as well.
+ uptr TaggedUserPtr;
+ uptr PrevUserPtr;
+ if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) &&
+ PrevUserPtr == UserPtr &&
+ (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
+ uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
+ const uptr NextPage = roundUp(TaggedUserPtr, getPageSizeCached());
+ if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
+ PrevEnd = NextPage;
+ TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
+ resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
+ if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
+ // If an allocation needs to be zeroed (i.e. calloc) we can normally
+ // avoid zeroing the memory now since we can rely on memory having
+ // been zeroed on free, as this is normally done while setting the
+ // UAF tag. But if tagging was disabled per-thread when the memory
+ // was freed, it would not have been retagged and thus zeroed, and
+ // therefore it needs to be zeroed now.
+ memset(TaggedPtr, 0,
+ Min(Size, roundUp(PrevEnd - TaggedUserPtr,
+ archMemoryTagGranuleSize())));
+ } else if (Size) {
+ // Clear any stack metadata that may have previously been stored in
+ // the chunk data.
+ memset(TaggedPtr, 0, archMemoryTagGranuleSize());
+ }
+ } else {
+ const uptr OddEvenMask =
+ computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
+ TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
+ }
+ storePrimaryAllocationStackMaybe(Options, Ptr);
+ } else {
+ // Init the secondary chunk.
+
+ Block = addHeaderTag(Block);
+ Ptr = addHeaderTag(Ptr);
+ storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
+ storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
+ }
+
+ Chunk::UnpackedHeader Header = {};
+
+ const uptr DefaultAlignedPtr =
+ reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
+ if (UNLIKELY(DefaultAlignedPtr != UserPtr)) {
+ const uptr Offset = UserPtr - DefaultAlignedPtr;
+ DCHECK_GE(Offset, 2 * sizeof(u32));
+ // The BlockMarker has no security purpose, but is specifically meant for
+ // the chunk iteration function that can be used in debugging situations.
+ // It is the only situation where we have to locate the start of a chunk
+ // based on its block address.
+ reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
+ reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
+ Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
+ }
+
+ Header.ClassId = ClassId & Chunk::ClassIdMask;
+ Header.State = Chunk::State::Allocated;
+ Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
+ Header.SizeOrUnusedBytes = SizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
+ Chunk::storeHeader(Cookie, Ptr, &Header);
+
+ return TaggedPtr;
+ }
+
void quarantineOrDeallocateChunk(const Options &Options, void *TaggedPtr,
Chunk::UnpackedHeader *Header,
uptr Size) NO_THREAD_SAFETY_ANALYSIS {
@@ -1177,31 +1231,23 @@ private:
Header->State = Chunk::State::Available;
else
Header->State = Chunk::State::Quarantined;
- Header->OriginOrWasZeroed = useMemoryTagging<AllocatorConfig>(Options) &&
- Header->ClassId &&
- !TSDRegistry.getDisableMemInit();
- Chunk::storeHeader(Cookie, Ptr, Header);
- if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options))) {
- u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
- storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
- if (Header->ClassId) {
- if (!TSDRegistry.getDisableMemInit()) {
- uptr TaggedBegin, TaggedEnd;
- const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
- Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, Header)),
- Header->ClassId);
- // Exclude the previous tag so that immediate use after free is
- // detected 100% of the time.
- setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
- &TaggedEnd);
- }
- }
+ void *BlockBegin;
+ if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options))) {
+ Header->OriginOrWasZeroed = 0U;
+ if (BypassQuarantine && allocatorSupportsMemoryTagging<AllocatorConfig>())
+ Ptr = untagPointer(Ptr);
+ BlockBegin = getBlockBegin(Ptr, Header);
+ } else {
+ Header->OriginOrWasZeroed =
+ Header->ClassId && !TSDRegistry.getDisableMemInit();
+ BlockBegin =
+ retagBlock(Options, TaggedPtr, Ptr, Header, Size, BypassQuarantine);
}
+
+ Chunk::storeHeader(Cookie, Ptr, Header);
+
if (BypassQuarantine) {
- if (allocatorSupportsMemoryTagging<AllocatorConfig>())
- Ptr = untagPointer(Ptr);
- void *BlockBegin = getBlockBegin(Ptr, Header);
const uptr ClassId = Header->ClassId;
if (LIKELY(ClassId)) {
bool CacheDrained;
@@ -1216,9 +1262,6 @@ private:
if (CacheDrained)
Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
} else {
- if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options)))
- storeTags(reinterpret_cast<uptr>(BlockBegin),
- reinterpret_cast<uptr>(Ptr));
Secondary.deallocate(Options, BlockBegin);
}
} else {
@@ -1228,6 +1271,34 @@ private:
}
}
+ NOINLINE void *retagBlock(const Options &Options, void *TaggedPtr, void *&Ptr,
+ Chunk::UnpackedHeader *Header, const uptr Size,
+ bool BypassQuarantine) {
+ DCHECK(useMemoryTagging<AllocatorConfig>(Options));
+
+ const u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
+ storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
+ if (Header->ClassId && !TSDRegistry.getDisableMemInit()) {
+ uptr TaggedBegin, TaggedEnd;
+ const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
+ Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, Header)),
+ Header->ClassId);
+ // Exclude the previous tag so that immediate use after free is
+ // detected 100% of the time.
+ setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
+ &TaggedEnd);
+ }
+
+ Ptr = untagPointer(Ptr);
+ void *BlockBegin = getBlockBegin(Ptr, Header);
+ if (BypassQuarantine && !Header->ClassId) {
+ storeTags(reinterpret_cast<uptr>(BlockBegin),
+ reinterpret_cast<uptr>(Ptr));
+ }
+
+ return BlockBegin;
+ }
+
bool getChunkFromBlock(uptr Block, uptr *Chunk,
Chunk::UnpackedHeader *Header) {
*Chunk =