From f810b5921dde57180956b9eadf39a3a2b8cb5855 Mon Sep 17 00:00:00 2001 From: Hidehiko Abe Date: Wed, 21 Feb 2018 01:04:53 +0900 Subject: Re-uprev to r462023. Previous uprevs didn't include several CLs. This re-uprev's to the r462023. cf) Missing CLs I found. https://codereview.chromium.org/2556563002 https://codereview.chromium.org/2754483002 https://codereview.chromium.org/2171833002 https://codereview.chromium.org/2778183003 https://codereview.chromium.org/2500473002 https://codereview.chromium.org/2173523002 https://codereview.chromium.org/2666423002 https://codereview.chromium.org/2723423002 https://codereview.chromium.org/2789463002 https://codereview.chromium.org/2723083004 https://codereview.chromium.org/2637843002 https://codereview.chromium.org/2785943004 https://codereview.chromium.org/2657603004 https://codereview.chromium.org/2774363003 https://codereview.chromium.org/2776853002 https://codereview.chromium.org/2736053003 https://codereview.chromium.org/2779413002 https://codereview.chromium.org/2782503002 https://codereview.chromium.org/2782083003 https://codereview.chromium.org/2399213005 https://codereview.chromium.org/2787383002 https://codereview.chromium.org/2790523004 https://codereview.chromium.org/2787533002 https://codereview.chromium.org/2780983003 https://codereview.chromium.org/2790403003 https://codereview.chromium.org/2747673002 https://codereview.chromium.org/2778173003 https://codereview.chromium.org/2788613004 https://codereview.chromium.org/2781983003 https://codereview.chromium.org/2774223003 Bug: 73270448 Test: Built and ran libchrome_test locally. Run treehugger. Change-Id: I5e76096d4fcf660571275cce5f4a980a8bb574fe --- .../heap_profiler_allocation_register.h | 79 +++++++++++++--------- 1 file changed, 48 insertions(+), 31 deletions(-) (limited to 'base/trace_event/heap_profiler_allocation_register.h') diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h index d6a02faeae..ac9872f001 100644 --- a/base/trace_event/heap_profiler_allocation_register.h +++ b/base/trace_event/heap_profiler_allocation_register.h @@ -48,24 +48,26 @@ class FixedHashMap { // For implementation simplicity API uses integer index instead // of iterators. Most operations (except Find) on KVIndex are O(1). using KVIndex = size_t; - static const KVIndex kInvalidKVIndex = static_cast(-1); + enum : KVIndex { kInvalidKVIndex = static_cast(-1) }; // Capacity controls how many items this hash map can hold, and largely // affects memory footprint. - FixedHashMap(size_t capacity) - : num_cells_(capacity), - cells_(static_cast( - AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), - buckets_(static_cast( - AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))), - free_list_(nullptr), - next_unused_cell_(0) {} + explicit FixedHashMap(size_t capacity) + : num_cells_(capacity), + num_inserts_dropped_(0), + cells_(static_cast( + AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), + buckets_(static_cast( + AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))), + free_list_(nullptr), + next_unused_cell_(0) {} ~FixedHashMap() { FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell)); FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket)); } + // Returns {kInvalidKVIndex, false} if the table is full. std::pair Insert(const Key& key, const Value& value) { Cell** p_cell = Lookup(key); Cell* cell = *p_cell; @@ -74,7 +76,15 @@ class FixedHashMap { } // Get a free cell and link it. - *p_cell = cell = GetFreeCell(); + cell = GetFreeCell(); + if (!cell) { + if (num_inserts_dropped_ < + std::numeric_limits::max()) { + ++num_inserts_dropped_; + } + return {kInvalidKVIndex, false}; + } + *p_cell = cell; cell->p_prev = p_cell; cell->next = nullptr; @@ -137,6 +147,8 @@ class FixedHashMap { bits::Align(sizeof(Bucket) * NumBuckets, page_size); } + size_t num_inserts_dropped() const { return num_inserts_dropped_; } + private: friend base::trace_event::AllocationRegisterTest; @@ -175,7 +187,8 @@ class FixedHashMap { } // Returns a cell that is not being used to store an entry (either by - // recycling from the free list or by taking a fresh cell). + // recycling from the free list or by taking a fresh cell). May return + // nullptr if the hash table has run out of memory. Cell* GetFreeCell() { // First try to re-use a cell from the free list. if (free_list_) { @@ -184,26 +197,14 @@ class FixedHashMap { return cell; } - // Otherwise pick the next cell that has not been touched before. - size_t idx = next_unused_cell_; - next_unused_cell_++; - // If the hash table has too little capacity (when too little address space - // was reserved for |cells_|), |next_unused_cell_| can be an index outside - // of the allocated storage. A guard page is allocated there to crash the - // program in that case. There are alternative solutions: - // - Deal with it, increase capacity by reallocating |cells_|. - // - Refuse to insert and let the caller deal with it. - // Because free cells are re-used before accessing fresh cells with a higher - // index, and because reserving address space without touching it is cheap, - // the simplest solution is to just allocate a humongous chunk of address - // space. - - CHECK_LT(next_unused_cell_, num_cells_ + 1) - << "Allocation Register hash table has too little capacity. Increase " - "the capacity to run heap profiler in large sessions."; - - return &cells_[idx]; + // was reserved for |cells_|), return nullptr. + if (next_unused_cell_ >= num_cells_) { + return nullptr; + } + + // Otherwise pick the next cell that has not been touched before. + return &cells_[next_unused_cell_++]; } // Returns a value in the range [0, NumBuckets - 1] (inclusive). @@ -219,6 +220,9 @@ class FixedHashMap { // Number of cells. size_t const num_cells_; + // Number of calls to Insert() that were lost because the hashtable was full. + size_t num_inserts_dropped_; + // The array of cells. This array is backed by mmapped memory. Lower indices // are accessed first, higher indices are accessed only when the |free_list_| // is empty. This is to minimize the amount of resident memory used. @@ -248,6 +252,8 @@ class TraceEventMemoryOverhead; // freed. Internally it has two hashtables: one for Backtraces and one for // actual allocations. Sizes of both hashtables are fixed, and this class // allocates (mmaps) only in its constructor. +// +// When either hash table hits max size, new inserts are dropped. class BASE_EXPORT AllocationRegister { public: // Details about an allocation. @@ -282,7 +288,10 @@ class BASE_EXPORT AllocationRegister { // Inserts allocation details into the table. If the address was present // already, its details are updated. |address| must not be null. - void Insert(const void* address, + // + // Returns true if an insert occurred. Inserts may fail because the table + // is full. + bool Insert(const void* address, size_t size, const AllocationContext& context); @@ -359,6 +368,14 @@ class BASE_EXPORT AllocationRegister { AllocationMap allocations_; BacktraceMap backtraces_; + // Sentinel used when the |backtraces_| table is full. + // + // This is a slightly abstraction to allow for constant propagation. It + // knows that the sentinel will be the first item inserted into the table + // and that the first index retuned will be 0. The constructor DCHECKs + // this assumption. + enum : BacktraceMap::KVIndex { kOutOfStorageBacktraceIndex = 0 }; + DISALLOW_COPY_AND_ASSIGN(AllocationRegister); }; -- cgit v1.2.3